Compare commits

..

12 Commits

Author SHA1 Message Date
Jacopo Secchiero
ce0d111d7c update docker-ce to 18.09.7 (#4973) (#5162) 2019-10-16 04:29:41 -07:00
Maxime Guyot
6ed833ec33 Update to Kube v1.14.6 (#5098) 2019-08-22 00:54:31 -07:00
Vitaliy Dmitriev
f61dbb74bf [contrib/heketi]: tear down additions and fixes. Heketi updated to version 9 (#5026)
* lvm packages removal during tear down skipped by default
  * lvm utils execution PATH fixed for CentOS/RH
  * Heketi updated to the latest version 9

Signed-off-by: Vitaliy Dmitriev <vi7alya@gmail.com>
2019-08-06 03:31:54 -07:00
nautikos1235
58126de3d9 Fix certificate-key param for kubeadm init (#4789) (#4988)
* Fix certificate-key param for kubeadm init

* Fix yamllint error
2019-07-19 09:33:12 -07:00
Vitaliy Dmitriev
2b69befb59 kubeadm join path fixed for RH linux (#4987)
Signed-off-by: Vitaliy Dmitriev <vi7alya@gmail.com>
2019-07-19 05:59:13 -07:00
Andreas Holmsten
7d8da8348e Cherry pick #4857 #4859 #4867 into release-2.10 (#4877)
* fix start CoreDNS when init secondary master (#4867)

* Update dns-autoscaler.yml.j2 (#4857)

Merge two tolerations.  because the latest tolerations will cover the first tolerations.

* Remove GCE tests and CNCF funding ended (#4859)
2019-06-13 05:22:17 -07:00
Andreas Holmsten
b90b1fc2b9 updated pinning to prevent breaking changes (#4783) (#4873)
* updated ansible pinning to prevent more possibilities of breaking changes

* more exact pinning of ansible version

* more exact pinning of ansible version and also all the rest

* added testing requirements.txt pinning settings

* removed boto from testing requirements.txt
2019-06-13 02:36:19 -07:00
Andreas Holmsten
147ea54374 Cherry pick #4861 into release-2.10 (#4874)
* Rebase only on PRs (#4861)

* Rebase from release-2.10 branch instead of master
2019-06-12 23:06:13 -07:00
Bort Verwilst
d53782a7f1 k8s 1.14.3 (#4855) 2019-06-09 03:41:05 -07:00
Bort Verwilst
e2f5a9748e upgrade to 1.14.2 (#4782)
* upgrade to 1.14.2

* Remove trailing whitespace
2019-05-20 06:01:15 -07:00
Andreas Krüger
0d1a34ee6b Merge pull request #4718 from lystor/bug-4695
Fix adding output of kubeadm to the admin.conf downloaded to the arti…
2019-05-08 14:52:19 +02:00
lystor
28ad0e676d Fix adding output of kubeadm to the admin.conf downloaded to the artifacts directory (#4696)
Fixes issue https://github.com/kubernetes-sigs/kubespray/issues/4695
2019-05-07 12:55:54 +03:00
398 changed files with 3209 additions and 6471 deletions

View File

@@ -3,23 +3,14 @@ parseable: true
skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
# These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
- '204'
- '206'
- '301'
- '305'
- '306'
- '404'
- '502'
- '503'
# These rules are intentionally skipped:
#
# [E204]: "Lines should be no longer than 160 chars"
# This could be re-enabled with a major rewrite in the future.
# For now, there's not enough value gain from strictly limiting line length.
# (Disabled in May 2019)
- '204'
# [E701]: "meta/main.yml should contain relevant info"
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
# While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019)
- '504'
- '701'

View File

@@ -1,8 +1,8 @@
---
stages:
- unit-tests
- deploy-part1
- moderator
- deploy-part1
- deploy-part2
- deploy-gce
- deploy-special
@@ -37,7 +37,7 @@ before_script:
tags:
- packet
variables:
KUBESPRAY_VERSION: v2.10.0
KUBESPRAY_VERSION: v2.9.0
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
.testcases: &testcases
@@ -60,8 +60,6 @@ ci-authorized:
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
# Disable ci moderator
only: []
include:
- .gitlab-ci/lint.yml

View File

@@ -20,8 +20,6 @@
<<: *gce_variables
tags:
- gce
except: ['triggers']
only: [/^pr-.*$/]
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-part1
@@ -38,6 +36,8 @@ gce_ubuntu18-flannel-aio:
stage: deploy-part1
<<: *gce
when: manual
except: ['triggers']
only: [/^pr-.*$/]
### PR JOBS PART2
@@ -45,11 +45,15 @@ gce_coreos-calico-aio:
stage: deploy-gce
<<: *gce
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
gce_centos7-flannel-addons:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: [/^pr-.*$/]
### MANUAL JOBS
@@ -60,42 +64,36 @@ gce_centos-weave-kubeadm-sep:
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
except: []
gce_ubuntu-weave-sep:
stage: deploy-gce
<<: *gce
when: manual
only: ['triggers']
except: []
gce_coreos-calico-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-canal-ha-triggers:
stage: deploy-special
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-flannel-addons-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-weave-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
# More builds for PRs/merges (manual) and triggers (auto)
@@ -104,23 +102,27 @@ gce_ubuntu-canal-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-flannel-ha:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
gce_centos-weave-kubeadm-triggers:
stage: deploy-gce
@@ -129,87 +131,99 @@ gce_centos-weave-kubeadm-triggers:
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
except: []
gce_ubuntu-contiv-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-cilium:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu18-cilium-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_debian9-calico-upgrade:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_debian9-calico-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_coreos-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-canal-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_rhel7-canal-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-canal-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-calico-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-calico-ha-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-kube-router:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-multus-calico:
stage: deploy-gce
@@ -217,11 +231,6 @@ gce_centos7-multus-calico:
variables:
<<: *centos7_multus_calico_variables
when: manual
gce_oracle-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
@@ -229,19 +238,27 @@ gce_opensuse-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-kube-router:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-kube-router-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

View File

@@ -6,15 +6,6 @@ yamllint:
- yamllint --strict .
except: ['triggers', 'master']
vagrant-validate:
extends: .job
stage: unit-tests
script:
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
- vagrant validate --ignore-provider
except: ['triggers', 'master']
ansible-lint:
extends: .job
stage: unit-tests

View File

@@ -9,8 +9,6 @@
<<: *packet_variables
tags:
- packet
only: [/^pr-.*$/]
except: ['triggers']
.test-upgrade: &test-upgrade
variables:
@@ -20,6 +18,8 @@ packet_ubuntu18-calico-aio:
stage: deploy-part1
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
# ### PR JOBS PART2
@@ -27,6 +27,8 @@ packet_centos7-flannel-addons:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
# ### MANUAL JOBS
@@ -35,14 +37,12 @@ packet_centos-weave-kubeadm-sep:
<<: *packet
when: on_success
only: ['triggers']
except: []
packet_ubuntu-weave-sep:
stage: deploy-part2
<<: *packet
when: manual
only: ['triggers']
except: []
# # More builds for PRs/merges (manual) and triggers (auto)
@@ -50,73 +50,74 @@ packet_ubuntu-canal-ha:
stage: deploy-special
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-canal-kubeadm:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-flannel-ha:
stage: deploy-part2
<<: *packet
when: manual
when: on_success
except: ['triggers']
packet_ubuntu-contiv-sep:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: on_success
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu18-cilium-sep:
stage: deploy-special
<<: *packet
when: manual
packet_ubuntu18-flannel-containerd:
stage: deploy-part2
<<: *packet
when: manual
packet_debian9-macvlan-sep:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_debian9-calico-upgrade:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-calico-ha:
stage: deploy-part2
<<: *packet
when: manual
packet_centos7-kube-ovn:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-kube-router:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: on_success
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-multus-calico:
stage: deploy-part2
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_opensuse-canal:
stage: deploy-part2
<<: *packet
when: manual
packet_oracle-7-canal:
stage: deploy-part2
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-kube-router-sep:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

View File

@@ -4,51 +4,49 @@
extends: .job
before_script:
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
- ./tests/scripts/terraform_install.sh
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Install Terraform
- apt-get install -y unzip
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
# Prepare inventory
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
- ln -s contrib/terraform/$PROVIDER/hosts
- terraform init contrib/terraform/$PROVIDER
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
- cd inventory/$CLUSTER
- ln -s ../../contrib/terraform/$PROVIDER/hosts
- terraform init ../../contrib/terraform/$PROVIDER
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
only: ['master', /^pr-.*$/]
.terraform_validate:
extends: .terraform_install
stage: unit-tests
only: ['master', /^pr-.*$/]
script:
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
- terraform fmt -check -diff contrib/terraform/$PROVIDER
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
.terraform_apply:
extends: .terraform_install
stage: deploy-part2
when: manual
only: [/^pr-.*$/]
variables:
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
ANSIBLE_INVENTORY: hosts
CI_PLATFORM: tf
TF_VAR_ssh_user: $SSH_USER
TF_VAR_cluster_name: $CI_JOB_ID
script:
- tests/scripts/testcases_run.sh
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
- ansible-playbook -i hosts ../../cluster.yml --become
after_script:
# Cleanup regardless of exit code
- ./tests/scripts/testcases_cleanup.sh
- cd inventory/$CLUSTER
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: 0.12.6
TF_VERSION: 0.11.11
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
@@ -72,6 +70,7 @@ tf-packet-ubuntu16-default:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_plan_k8s_masters: t1.small.x86
@@ -86,6 +85,7 @@ tf-packet-ubuntu18-default:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_plan_k8s_masters: t1.small.x86
@@ -105,16 +105,15 @@ tf-packet-ubuntu18-default:
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
tf-ovh_ubuntu18-calico:
tf-apply-ovh:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
TF_VERSION: 0.11.11
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
@@ -132,31 +131,3 @@ tf-ovh_ubuntu18-calico:
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-ovh_coreos-calico:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: core
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_image: "CoreOS Stable"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -4,8 +4,8 @@ RUN mkdir /kubespray
WORKDIR /kubespray
RUN apt update -y && \
apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq \
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
libssl-dev python-dev sshpass apt-transport-https jq \
ca-certificates curl gnupg2 software-properties-common python-pip
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
@@ -13,6 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
stable" \
&& apt update -y && apt-get install docker-ce -y
COPY . .
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl \
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl

View File

@@ -18,4 +18,3 @@ aliases:
- chapsuk
- mirwan
- miouge1
- holmsten

View File

@@ -36,9 +36,9 @@ To deploy the cluster you can use :
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# The option `-b` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
# Without -b the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
@@ -101,7 +101,6 @@ Supported Linux Distributions
- **Fedora** 28
- **Fedora/CentOS** Atomic
- **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7
Note: Upstart/SysV init based OS types are not supported.
@@ -109,33 +108,32 @@ Supported Components
--------------------
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
- [etcd](https://github.com/coreos/etcd) v3.3.10
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.6
- [etcd](https://github.com/coreos/etcd) v3.2.26
- [docker](https://www.docker.com/) v18.06 (see note)
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
- [calico](https://github.com/projectcalico/calico) v3.7.3
- [calico](https://github.com/projectcalico/calico) v3.4.0
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.5.5
- [cilium](https://github.com/cilium/cilium) v1.3.0
- [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
- [multus](https://github.com/intel/multus-cni) v3.2.1
- [weave](https://github.com/weaveworks/weave) v2.5.2
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
- [weave](https://github.com/weaveworks/weave) v2.5.1
- Application
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
- [coredns](https://github.com/coredns/coredns) v1.6.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
- [coredns](https://github.com/coredns/coredns) v1.5.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Requirements
------------
- **Minimum required version of Kubernetes is v1.14**
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
- **Ansible v2.7.8 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
@@ -158,7 +156,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
Network Plugins
---------------
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
@@ -172,17 +170,13 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
The choice is defined with the variable `kube_network_plugin`. There is also an

15
Vagrantfile vendored
View File

@@ -21,11 +21,10 @@ SUPPORTED_OS = {
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
}
# Defaults for config options defined in CONFIG
@@ -181,17 +180,9 @@ Vagrant.configure("2") do |config|
"flannel_interface": "eth1",
"kube_network_plugin": $network_plugin,
"kube_network_plugin_multus": $multi_networking,
"download_run_once": "True",
"download_localhost": "False",
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
# Make kubespray cache even when download_run_once is false
"download_force_cache": "True",
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
"download_keep_remote_cache": "False",
"docker_keepcache": "1",
# These two settings will put kubectl and admin.config in $inventory/artifacts
"kubeconfig_localhost": "True",
"kubectl_localhost": "True",
"download_run_once": "False",
"download_localhost": "False",
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user]

View File

@@ -4,8 +4,6 @@ ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
force_valid_group_names = ignore
host_key_checking=False
gathering = smart

View File

@@ -19,14 +19,27 @@
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: false
pre_tasks:
- name: gather facts from all instances
setup:
delegate_to: "{{item}}"
delegate_facts: true
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
run_once: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
@@ -39,23 +52,13 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: not etcd_kubeadm_enabled| default(false)
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when: not etcd_kubeadm_enabled| default(false)
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -79,12 +82,6 @@
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
- hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
@@ -101,6 +98,12 @@
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:

View File

@@ -42,11 +42,8 @@ class SearchEC2Tags(object):
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
cluster_name = os.getenv('CLUSTER_NAME')
if cluster_name:
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
instances = ec2.instances.filter(Filters=filters)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
##Suppose default vpc_visibility is private

View File

@@ -4,11 +4,8 @@
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd
- name: Set vm_list
set_fact:
- set_fact:
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -8,22 +8,9 @@
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd
- name: Set VM IP, roles lists and load balancer public IP
set_fact:
- set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
- name: Generate Load Balancer variables
template:
src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,8 +0,0 @@
## External LB example config
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
loadbalancer_apiserver:
address: {{ lb_pubip.ipAddress }}
port: 6443
## Internal loadbalancers for apiservers
loadbalancer_apiserver_localhost: false

View File

@@ -29,7 +29,7 @@ sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
imageReference:
publisher: "OpenLogic"
offer: "CentOS"
sku: "7.5"
sku: "7.2"
version: "latest"
imageReferenceJson: "{{imageReference|to_json}}"

View File

@@ -1,18 +1,10 @@
---
- name: Set base_dir
set_fact:
- set_fact:
base_dir: "{{playbook_dir}}/.generated/"
- name: Create base_dir
file:
path: "{{ base_dir }}"
state: directory
recurse: true
- file: path={{base_dir}} state=directory recurse=true
- name: Store json files in base_dir
template:
src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}"
- template: src={{item}} dest="{{base_dir}}/{{item}}"
with_items:
- network.json
- storage.json

View File

@@ -59,7 +59,6 @@ def get_var_as_bool(name, default):
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
# Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
@@ -97,10 +96,9 @@ class KubesprayInventory(object):
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_master(list(self.hosts.keys())[
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
self.set_kube_master(list(self.hosts.keys())[etcd_hosts_count:5])
else:
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
self.set_kube_master(list(self.hosts.keys())[:2])
self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])

View File

@@ -2,11 +2,9 @@
```
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that dont run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
```
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
## Install
```
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
```

View File

@@ -1,12 +1,6 @@
---
metallb:
ip_range: "10.5.0.50-10.5.0.99"
protocol: "layer2"
# additional_address_pools:
# kube_service_pool:
# ip_range: "10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
limits:
cpu: "100m"
memory: "100Mi"

View File

@@ -8,14 +8,6 @@ data:
config: |
address-pools:
- name: loadbalanced
protocol: {{ metallb.protocol }}
protocol: layer2
addresses:
- {{ metallb.ip_range }}
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
- name: {{ pool }}
protocol: {{ metallb.additional_address_pools[pool].protocol }}
addresses:
- {{ metallb.additional_address_pools[pool].ip_range }}
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
{% endfor %}
{% endif %}

View File

@@ -1,15 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,8 +1,6 @@
---
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
template:
src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}"
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}

View File

@@ -4,7 +4,6 @@
register: "initial_heketi_state"
changed_when: false
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
- name: "Bootstrap heketi."
when:
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
@@ -17,20 +16,15 @@
register: "initial_heketi_pod"
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
changed_when: false
- name: "Ensure heketi bootstrap pod is up."
assert:
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
- name: Store the initial heketi pod name
set_fact:
- set_fact:
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
- name: "Test heketi topology."
changed_when: false
register: "heketi_topology"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
- name: "Load heketi topology."
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
include_tasks: "bootstrap/topology.yml"
@@ -48,7 +42,6 @@
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
changed_when: false
register: "heketi_storage_state"
# ensure endpoints actually exist before trying to move database data to it
- name: "Create heketi storage."
include_tasks: "bootstrap/storage.yml"

View File

@@ -1,19 +1,11 @@
---
- name: Get storage nodes
register: "label_present"
- register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: "Assign storage label"
when: "label_present.stdout_lines|length == 0"
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
- name: Get storage nodes again
register: "label_present"
- register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: Ensure the label has been set
assert:
that: "label_present|length > 0"
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }

View File

@@ -1,24 +1,19 @@
---
- name: "Kubernetes Apps | Lay Down Heketi"
become: true
template:
src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json"
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi"
kube:
name: "GlusterFS"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{ kube_config_dir }}/heketi-deployment.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"
- name: "Ensure heketi is up and running."
changed_when: false
register: "heketi_state"
vars:
heketi_state:
stdout: "{}"
heketi_state: { stdout: "{}" }
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
@@ -27,7 +22,5 @@
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
retries: 60
delay: 5
- name: Set the Heketi pod name
set_fact:
- set_fact:
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"

View File

@@ -1,44 +1,31 @@
---
- name: Get clusterrolebindings
register: "clusterrolebinding_state"
- register: "clusterrolebinding_state"
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
changed_when: false
- name: "Kubernetes Apps | Deploy cluster role binding."
when: "clusterrolebinding_state.stdout == \"\""
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
- name: Get clusterrolebindings again
register: "clusterrolebinding_state"
- register: "clusterrolebinding_state"
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
changed_when: false
- name: Make sure that clusterrolebindings are present now
assert:
- assert:
that: "clusterrolebinding_state.stdout != \"\""
msg: "Cluster role binding is not present."
- name: Get the heketi-config-secret secret
register: "secret_state"
- register: "secret_state"
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
changed_when: false
- name: "Render Heketi secret configuration."
become: true
template:
src: "heketi.json.j2"
dest: "{{ kube_config_dir }}/heketi.json"
- name: "Deploy Heketi config secret"
when: "secret_state.stdout == \"\""
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
- name: Get the heketi-config-secret secret again
register: "secret_state"
- register: "secret_state"
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
changed_when: false
- name: Make sure the heketi-config-secret secret exists now
assert:
- assert:
that: "secret_state.stdout != \"\""
msg: "Heketi config secret is not present."

View File

@@ -1,5 +1,4 @@
.terraform
*.tfvars
!sample-inventory\/cluster.tfvars
*.tfstate
*.tfstate.backup

View File

@@ -16,13 +16,14 @@ most modern installs of OpenStack that support the basic services.
- [ELASTX](https://elastx.se/)
- [EnterCloudSuite](https://www.entercloudsuite.com/)
- [FugaCloud](https://fuga.cloud/)
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
- [OVH](https://www.ovh.com/)
- [Rackspace](https://www.rackspace.com/)
- [Ultimum](https://ultimum.io/)
- [VexxHost](https://vexxhost.com/)
- [Zetta](https://www.zetta.io/)
### Known incompatible public clouds
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
## Approach
The terraform configuration inspects variables found in
@@ -69,7 +70,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
- you already have a suitable OS image in Glance
- you already have a floating IP pool created
@@ -219,7 +220,7 @@ set OS_PROJECT_DOMAIN_NAME=Default
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|Variable | Description |
|---------|-------------|
@@ -245,7 +246,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
#### Terraform state files
@@ -276,7 +276,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
```
if you chose to create a bastion host, this script will create
@@ -290,7 +290,7 @@ pick it up automatically.
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
@@ -325,30 +325,6 @@ $ ssh-add ~/.ssh/id_rsa
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
#### Metadata variables
The [python script](../terraform.py) that reads the
generated`.tfstate` file to generate a dynamic inventory recognizes
some variables within a "metadata" block, defined in a "resource"
block (example):
```
resource "openstack_compute_instance_v2" "example" {
...
metadata {
ssh_user = "ubuntu"
prefer_ipv6 = true
python_bin = "/usr/bin/python3"
}
...
}
```
As the example shows, these let you define the SSH username for
Ansible, a Python binary which is needed by Ansible if
`/usr/bin/python` doesn't exist, and whether the IPv6 address of the
instance should be preferred over IPv4.
#### Bastion host
Bastion access will be determined by:
@@ -415,11 +391,6 @@ kube_network_plugin: flannel
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
- Set max amount of attached cinder volume per host (default 256)
```
node_volume_attach_limit: 26
```
### Deploy Kubernetes

View File

@@ -3,7 +3,7 @@ provider "openstack" {
}
module "network" {
source = "./modules/network"
source = "modules/network"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
@@ -14,7 +14,7 @@ module "network" {
}
module "ips" {
source = "./modules/ips"
source = "modules/ips"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
@@ -27,7 +27,7 @@ module "ips" {
}
module "compute" {
source = "./modules/compute"
source = "modules/compute"
cluster_name = "${var.cluster_name}"
az_list = "${var.az_list}"
@@ -63,7 +63,6 @@ module "compute" {
supplementary_master_groups = "${var.supplementary_master_groups}"
supplementary_node_groups = "${var.supplementary_node_groups}"
worker_allowed_ports = "${var.worker_allowed_ports}"
wait_for_floatingip = "${var.wait_for_floatingip}"
network_id = "${module.network.router_id}"
}

View File

@@ -22,20 +22,20 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
resource "openstack_networking_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
count = "${var.number_of_bastions != "" ? 1 : 0}"
count = "${var.number_of_bastions ? 1 : 0}"
description = "${var.cluster_name} - Bastion Server"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "bastion" {
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
}
resource "openstack_networking_secgroup_v2" "k8s" {
@@ -99,7 +99,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
"${openstack_networking_secgroup_v2.bastion.name}",
]
metadata = {
@@ -109,7 +109,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -136,7 +136,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -163,7 +163,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -257,7 +257,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -288,14 +288,12 @@ resource "openstack_compute_floatingip_associate_v2" "bastion" {
count = "${var.number_of_bastions}"
floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
count = "${var.number_of_k8s_masters}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
@@ -308,7 +306,6 @@ resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
count = "${var.number_of_k8s_nodes}"
floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {

View File

@@ -82,8 +82,6 @@ variable "k8s_allowed_egress_ips" {
type = "list"
}
variable "wait_for_floatingip" {}
variable "supplementary_master_groups" {
default = ""
}

View File

@@ -1,5 +1,5 @@
resource "null_resource" "dummy_dependency" {
triggers = {
triggers {
dependency_id = "${var.router_id}"
}
}

View File

@@ -1,15 +1,15 @@
output "k8s_master_fips" {
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
}
output "k8s_master_no_etcd_fips" {
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_master_no_etcd.*.address}"]
}
output "k8s_node_fips" {
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
}
output "bastion_fips" {
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
}

View File

@@ -14,7 +14,7 @@ resource "openstack_networking_network_v2" "k8s" {
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
count = "${var.use_neutron}"
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
network_id = "${openstack_networking_network_v2.k8s.id}"
cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
@@ -22,6 +22,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
resource "openstack_networking_router_interface_v2" "k8s" {
count = "${var.use_neutron}"
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
router_id = "${openstack_networking_router_v2.k8s.id}"
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
}

View File

@@ -125,11 +125,6 @@ variable "floatingip_pool" {
default = "external"
}
variable "wait_for_floatingip" {
description = "Terraform will poll the instance until the floating IP has been associated."
default = "false"
}
variable "external_net" {
description = "uuid of the external/public network"
}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
#
# Copyright 2015 Cisco Systems, Inc.
#
@@ -20,15 +20,15 @@
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
import random
from functools import wraps
import json
import os
import re
VERSION = '0.4.0pre'
VERSION = '0.3.0pre'
def tfstates(root=None):
@@ -38,58 +38,15 @@ def tfstates(root=None):
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def convert_to_v3_structure(attributes, prefix=''):
""" Convert the attributes from v4 to v3
Receives a dict and return a dictionary """
result = {}
if isinstance(attributes, str):
# In the case when we receive a string (e.g. values for security_groups)
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
for key, value in attributes.items():
if isinstance(value, list):
if len(value):
result['{}{}.#'.format(prefix, key, hash)] = len(value)
for i, v in enumerate(value):
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
elif isinstance(value, dict):
result['{}{}.%'.format(prefix, key)] = len(value)
for k, v in value.items():
result['{}{}.{}'.format(prefix, key, k)] = v
else:
result['{}{}'.format(prefix, key)] = value
return result
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
tf_version = state['version']
if tf_version == 3:
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
elif tf_version == 4:
# In version 4 the structure changes so we need to iterate
# each instance inside the resource branch.
for resource in state['resources']:
name = resource['module'].split('.')[-1]
for instance in resource['instances']:
key = "{}.{}".format(resource['type'], resource['name'])
if 'index_key' in instance:
key = "{}.{}".format(key, instance['index_key'])
data = {}
data['type'] = resource['type']
data['provider'] = resource['provider']
data['depends_on'] = instance.get('depends_on', [])
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
if 'id' in instance['attributes']:
data['primary']['id'] = instance['attributes']['id']
data['primary']['meta'] = instance['attributes'].get('meta',{})
yield name, key, data
else:
raise KeyError('tfstate version %d not supported' % tf_version)
## READ RESOURCES
PARSERS = {}
@@ -152,7 +109,7 @@ def calculate_mantl_vars(func):
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in list(source.items()):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
@@ -170,7 +127,7 @@ def parse_attr_list(source, prefix, sep='.'):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return list(attrs.values())
return attrs.values()
def parse_dict(source, prefix, sep='.'):
@@ -282,12 +239,6 @@ def openstack_host(resource, module_name):
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
attrs.update({
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
'publicly_routable': True,
})
else:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
@@ -301,9 +252,9 @@ def openstack_host(resource, module_name):
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
device_index = 1
for key, value in list(raw_attrs.items()):
for key, value in raw_attrs.items():
match = re.search("^volume.*.device$", key)
if match:
attrs['disk_volume_device_'+str(device_index)] = value
@@ -321,7 +272,7 @@ def openstack_host(resource, module_name):
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in list(attrs['metadata'].items()))
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl

View File

@@ -32,7 +32,7 @@ The name of the resource group your instances are in, can be retrieved via `azur
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
#### azure\_subnet\_name
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
#### azure\_security\_group\_name
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
@@ -40,14 +40,14 @@ The name of the network security group your instances are in, can be retrieved v
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
These will have to be generated first:
- Create an Azure AD Application with:
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
display name, identifier-uri, homepage and the password can be choosen
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
The name, identifier-uri, home-page and the password can be choosen
Note the AppId in the output.
- Create Service principal for the application with:
`azure ad sp create --id AppId`
`azure ad sp create --applicationId AppId`
This is the AppId from the last command
- Create the role assignment with:
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.

View File

@@ -119,13 +119,13 @@ recommended here:
You need to edit your inventory and add:
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
group of `k8s-cluster` group.
* `calico-rr` group with nodes in it. At the moment it's incompatible with
`kube-node` due to BGP port conflict with `calico-node` container. So you
should not have nodes in both `calico-rr` and `kube-node` groups.
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with standalone route reflectors:
Here's an example of Kubespray inventory with route reflectors:
```
[all]
@@ -154,7 +154,6 @@ node5
[k8s-cluster:children]
kube-node
kube-master
calico-rr
[calico-rr]
rr0

View File

@@ -114,12 +114,10 @@ The only exception is that ``hostNetwork: true`` PODs and non-k8s managed contai
cluster service names.
## Nodelocal DNS cache
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns / core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
Limitations
-----------
@@ -131,7 +129,9 @@ Limitations
* There is
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
for the SkyDNS ``ndots`` param.
for the SkyDNS ``ndots`` param via an
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
add-on, while SkyDNS supports it though.
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual

View File

@@ -3,22 +3,23 @@ Downloading binaries and containers
Kubespray supports several download/upload modes. The default is:
* Each node downloads binaries and container images on its own, which is ``download_run_once: False``.
* Each node downloads binaries and container images on its own, which is
``download_run_once: False``.
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
* For system managed containers, like kubelet or etcd, pull policy is ``download_always_pull: False``, which is pull if only the wanted repo and tag/sha256 digest differs from that the host has.
* For system managed containers, like kubelet or etcd, pull policy is
``download_always_pull: False``, which is pull if only the wanted repo and
tag/sha256 digest differs from that the host has.
There is also a "pull once, push many" mode as well:
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
On caching:
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
* Override the ``download_run_once: True`` to download container images only once
then push to cluster nodes in batches. The default delegate node
for pushing images is the first `kube-master`.
* If your ansible runner node (aka the admin node) have password-less sudo and
docker enabled, you may want to define the ``download_localhost: True``, which
makes that node a delegate for pushing images while running the deployment with
ansible. This maybe the case if cluster nodes cannot access each over via ssh
or you want to use local docker images as a cache for multiple clusters.
Container images and binary files are described by the vars like ``foo_version``,
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
@@ -28,14 +29,15 @@ Container images may be defined by its repo and tag, for example:
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
Note, the SHA256 digest and the image tag must be both specified and correspond
Note, the sha256 digest and the image tag must be both specified and correspond
to each other. The given example above is represented by the following vars:
```yaml
```
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
dnsmasq_image_repo: andyshinn/dnsmasq
dnsmasq_image_tag: '2.72'
```
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
The full list of available vars may be found in the download's ansible role defaults.
Those also allow to specify custom urls and local repositories for binaries and container
images as well. See also the DNS stack docs for the related intranet configuration,
so the hosts can resolve those urls and repos.
@@ -44,7 +46,7 @@ so the hosts can resolve those urls and repos.
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
NOTE: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
* `pyrepo_index` (and optionally `pyrepo_cert`)
* Depending on the `container_manager`
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)

View File

@@ -51,27 +51,20 @@ You may want to add worker, master or etcd nodes to your existing cluster. This
Remove nodes
------------
You may want to remove **master**, **worker**, or **etcd** nodes from your
existing cluster. This can be done by re-running the `remove-node.yml`
playbook. First, all specified nodes will be drained, then stop some
kubernetes services and delete some certificates,
and finally execute the kubectl command to delete these nodes.
This can be combined with the add node function. This is generally helpful
when doing something like autoscaling your clusters. Of course, if a node
is not working, you can remove the node and install it again.
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
--private-key=~/.ssh/private_key
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
```
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
--private-key=~/.ssh/private_key \
--extra-vars "node=nodename,nodename2"
```
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
to skip the node reset step. If one node is unavailable, but others you wish
to remove are able to connect via SSH, you could set reset_nodes=no as a host
var in inventory.
Connecting to Kubernetes
------------------------

View File

@@ -1,48 +0,0 @@
Kube-OVN
===========
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
## How to use it
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
```
...
kube_network_plugin: kube-ovn
...
```
## Verifying kube-ovn install
Kube-OVN run ovn and controller in `kube-ovn` namespace
* Check the status of kube-ovn pods
```
# From the CLI
kubectl get pod -n kube-ovn
# Output
NAME READY STATUS RESTARTS AGE
kube-ovn-cni-49lsm 1/1 Running 0 2d20h
kube-ovn-cni-9db8f 1/1 Running 0 2d20h
kube-ovn-cni-wftdk 1/1 Running 0 2d20h
kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h
ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h
ovs-ovn-hqn8p 1/1 Running 0 4d16h
ovs-ovn-hvpl8 1/1 Running 0 4d16h
ovs-ovn-r5frh 1/1 Running 0 4d16h
```
* Check the default and node subnet
```
# From the CLI
kubectl get subnet
# Output
NAME PROTOCOL CIDR PRIVATE NAT
join IPv4 100.64.0.0/16 false false
ovn-default IPv4 10.16.0.0/16 false true
```

View File

@@ -1,48 +0,0 @@
Macvlan
===============
How to use it :
-------------
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
```
...
kube_network_plugin: macvlan
...
```
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
```
all:
hosts:
node1:
ip: 10.2.2.1
access_ip: 10.2.2.1
ansible_host: 10.2.2.1
macvlan_interface: ens5
```
Issue encountered :
-------------
- Service DNS
reply from unexpected source:
add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
- Disable nodelocaldns
The nodelocal dns IP is not reacheable.
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
```
enable_nodelocaldns: false
```

View File

@@ -13,7 +13,6 @@ Kubespray's roadmap
- [ ] GCE
- [x] AWS (contrib/terraform/aws)
- [x] Openstack (contrib/terraform/openstack)
- [x] Packet
- [ ] Digital Ocean
- [ ] Azure
- [ ] On AWS autoscaling, multi AZ
@@ -24,11 +23,11 @@ Kubespray's roadmap
https://github.com/kubernetes/kubernetes/issues/18112)
### Tests
- [x] Run kubernetes e2e tests
- [ ] Run kubernetes e2e tests
- [ ] Test idempotency on single OS but for all network plugins/container engines
- [ ] single test on AWS per day
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
- [x] Reorganize CI test vars into group var files
- [ ] Reorganize CI test vars into group var files
### Lifecycle
- [ ] Upgrade granularity: select components to upgrade and skip others
@@ -43,10 +42,23 @@ Kubespray's roadmap
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
### Addons (helm or native ansible)
- [x] Helm
- [x] Ingress-nginx
- [x] kubernetes-dashboard
Include optionals deployments to init the cluster:
##### Monitoring
- Heapster / Grafana ....
- **Prometheus**
##### Others
##### Dashboards:
- kubernetes-dashboard
- Fabric8
- Tectonic
- Cockpit
##### Paas like
- Openshift Origin
- Openstack
- Deis Workflow
### Others
- Organize and update documentation (split in categories)

View File

@@ -1,129 +1,69 @@
Introduction
============
Vagrant Install
=================
Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`.
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
with vmware, but is untested) you should be able to launch a 3 node
Kubernetes cluster by simply running `$ vagrant up`.<br />
To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time.
This will spin up 3 VMs and install kubernetes on them. Once they are
completed you can connect to any of them by running <br />
`$ vagrant ssh k8s-0[1..3]`.
For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well.
```
$ vagrant up
Bringing machine 'k8s-01' up with 'virtualbox' provider...
Bringing machine 'k8s-02' up with 'virtualbox' provider...
Bringing machine 'k8s-03' up with 'virtualbox' provider...
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
...
...
k8s-03: Running ansible-playbook...
PLAY [k8s-cluster] *************************************************************
TASK [setup] *******************************************************************
ok: [k8s-03]
ok: [k8s-01]
ok: [k8s-02]
...
...
PLAY RECAP *********************************************************************
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
$ vagrant ssh k8s-01
vagrant@k8s-01:~$ kubectl get nodes
NAME STATUS AGE
k8s-01 Ready 45s
k8s-02 Ready 45s
k8s-03 Ready 45s
```
Customize Vagrant
=================
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below.
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
or through an override file.
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
e.g.:
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
Use alternative OS for Vagrant
==============================
By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster.
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
operating system for your local cluster.
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
echo '$os = "coreos-stable"' >> vagrant/config.rb
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
File and image caching
======================
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](docs/downloads.md).
Example use of Vagrant
======================
The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>.
```
# use virtualenv to install all python requirements
VENVDIR=venv
virtualenv --python=/usr/bin/python3.7 $VENVDIR
source $VENVDIR/bin/activate
pip install -r requirements.txt
# prepare an inventory to test with
INV=inventory/my_lab
rm -rf ${INV}.bak &> /dev/null
mv ${INV} ${INV}.bak &> /dev/null
cp -a inventory/sample ${INV}
rm -f ${INV}/hosts.ini
# customize the vagrant environment
mkdir vagrant
cat << EOF > vagrant/config.rb
\$instance_name_prefix = "kub"
\$vm_cpus = 1
\$num_instances = 3
\$os = "centos-bento"
\$subnet = "10.0.20"
\$network_plugin = "flannel"
\$inventory = "$INV"
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
EOF
# make the rpm cache
mkdir -p temp/docker_rpms
vagrant up
# make a copy of the downloaded docker rpm, to speed up the next provisioning run
scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/
# copy kubectl access configuration in place
mkdir $HOME/.kube/ &> /dev/null
ln -s $INV/artifacts/admin.conf $HOME/.kube/config
# make the kubectl binary available
sudo ln -s $INV/artifacts/kubectl /usr/local/bin/kubectl
#or
export PATH=$PATH:$INV/artifacts
```
If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible:
```
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
```
If all went well, you check if it's all working as expected:
```
kubectl get nodes
```
The output should look like this:
```
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
kub-1 Ready master 32m v1.14.1
kub-2 Ready master 31m v1.14.1
kub-3 Ready <none> 31m v1.14.1
```
Another nice test is the following:
```
kubectl get po --all-namespaces -o wide
```
Which should yield something like the following:
```
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none>
kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none>
kube-system dns-autoscaler-5fc5fdbf6-5c48k 1/1 Running 0 31m 10.233.66.3 kub-3 <none> <none>
kube-system kube-apiserver-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-apiserver-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kube-controller-manager-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-controller-manager-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kube-flannel-8tgcn 2/2 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system kube-flannel-b2hgt 2/2 Running 0 31m 10.0.20.101 kub-1 <none> <none>
kube-system kube-flannel-zx4bc 2/2 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system kube-proxy-4bjdn 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system kube-proxy-l5tt5 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system kube-proxy-x59q8 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
kube-system kube-scheduler-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-scheduler-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kubernetes-dashboard-6c7466966c-jqz42 1/1 Running 0 31m 10.233.66.4 kub-3 <none> <none>
kube-system nginx-proxy-kub-3 1/1 Running 0 32m 10.0.20.103 kub-3 <none> <none>
kube-system nodelocaldns-2x7vh 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
```
Create clusteradmin rbac and get the login token for the dashboard:
```
kubectl create -f contrib/misc/clusteradmin-rbac.yml
kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$'
```
Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login).

View File

@@ -57,16 +57,10 @@ following default cluster parameters:
10.233.0.0/18). Must not overlap with kube_pods_subnet
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
overlap with kube_service_addresses.
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
on the CoreDNS service.
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
(default is k8s_external.local)
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
on the CoreDNS service.
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
@@ -104,7 +98,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000``
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node.
@@ -124,13 +117,11 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
from the kube-apiserver when the certificate expiration approaches.
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can be defined either as a dict or a comma-separated labels string:
*node_labels* must be defined as a dict:
```
node_labels:
label1_name: label1_value
label2_name: label2_value
node_labels: "label1_name=label1_value,label2_name=label2_value"
```
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
For example, taints can be set in the inventory as variables or more widely in group_vars.

View File

@@ -27,6 +27,12 @@
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:

View File

@@ -12,4 +12,3 @@ node1
[k8s-cluster:children]
kube-node
kube-master
calico-rr

View File

@@ -2,9 +2,6 @@
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Experimental kubeadm etcd deployment mode. Available only for new deployment
etcd_kubeadm_enabled: false
## Directory where the binaries will be installed
bin_dir: /usr/local/bin

View File

@@ -35,8 +35,6 @@ local_volume_provisioner_enabled: false
# local-storage:
# host_dir: /mnt/disks
# mount_dir: /mnt/disks
# volume_mode: Filesystem
# fs_type: ext4
# fast-disks:
# host_dir: /mnt/fast-disks
# mount_dir: /mnt/fast-disks
@@ -80,9 +78,8 @@ rbd_provisioner_enabled: false
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
ingress_publish_status_address: ""
# ingress_nginx_nodeselector:
# beta.kubernetes.io/os: "linux"
# beta.kubernetes.io/os: "linux": ""
# ingress_nginx_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
@@ -97,7 +94,7 @@ ingress_publish_status_address: ""
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/coredns:53"
# 53: "kube-system/kube-dns:53"
# Cert manager deployment
cert_manager_enabled: false

View File

@@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.15.11
kube_version: v1.14.6
# kubernetes image repo define
kube_image_repo: "gcr.io/google-containers"
@@ -134,12 +134,6 @@ dns_mode: coredns
# Enable nodelocal dns cache
enable_nodelocaldns: true
nodelocaldns_ip: 169.254.25.10
nodelocaldns_health_port: 9254
# Enable k8s_external plugin for CoreDNS
enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local
# Enable endpoint_pod_names option for kubernetes plugin
enable_coredns_k8s_endpoint_pod_names: false
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
@@ -151,7 +145,7 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
dns_domain: "{{ cluster_name }}"
## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## docker for docker and crio for cri-o.
container_manager: docker
## Settings for containerized control plane (etcd/kubelet/secrets)
@@ -193,18 +187,6 @@ podsecuritypolicy_enabled: false
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
# kubelet_enforce_node_allocatable: pods
## Optionally reserve resources for OS system daemons.
# system_reserved: true
## Uncomment to override default values
# system_memory_reserved: 512M
# system_cpu_reserved: 500m
## Reservation for master hosts
# system_master_memory_reserved: 256M
# system_master_cpu_reserved: 250m
# An alternative flexvolume plugin directory
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
## Supplementary addresses that can be added in kubernetes ssl keys.
## That can be useful for example to setup a keepalived virtual IP
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]

View File

@@ -1,6 +0,0 @@
---
# private interface, on a l2-network
macvlan_interface: "eth1"
# Enable nat in default gateway network interface
enable_nat_default_gateway: true

View File

@@ -28,9 +28,6 @@
# node5
# node6
[calico-rr]
[k8s-cluster:children]
kube-master
kube-node
calico-rr

View File

@@ -1,7 +1,6 @@
---
- hosts: localhost
become: no
gather_facts: no
tasks:
- name: "Check ansible version >=2.7.8"
assert:
@@ -13,8 +12,12 @@
vars:
ansible_connection: local
- hosts: all
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
gather_facts: no
vars_prompt:
name: "delete_nodes_confirmation"
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
@@ -28,20 +31,14 @@
when: delete_nodes_confirmation != "yes"
- hosts: kube-master
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: remove-node/pre-remove, tags: pre-remove }
- hosts: "{{ node | default('kube-node') }}"
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: reset, tags: reset, when: reset_nodes|default(True) }
- { role: reset, tags: reset }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
gather_facts: no
- hosts: kube-master
roles:
- { role: kubespray-defaults }
- { role: remove-node/post-remove, tags: post-remove }

View File

@@ -1,4 +1,4 @@
ansible==2.7.12
ansible==2.7.8
jinja2==2.10.1
netaddr==0.7.19
pbr==5.2.0

View File

@@ -1,13 +1,11 @@
---
- name: set bastion host IP
set_fact:
- set_fact:
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
delegate_to: localhost
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
- name: Store the current ansible_user in the real_user fact
set_fact:
- set_fact:
real_user: "{{ ansible_user }}"
- name: create ssh bastion conf

View File

@@ -15,4 +15,4 @@ Host {{ bastion_ip }}
ControlPersist 5m
Host {{ vars['hosts'] }}
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
ProxyCommand ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}

View File

@@ -23,6 +23,7 @@ Variables are listed with their default values, if applicable.
* `http_proxy`/`https_proxy`
The role will configure the package manager (if applicable) to download packages via a proxy.
This is currently implemented for CentOS/RHEL (`http_proxy` only) as well as Debian and Ubuntu (both `http_proxy` and `https_proxy` are respected)
* `override_system_hostname: true`
The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).

View File

@@ -19,9 +19,6 @@
- name: Run bootstrap.sh
script: bootstrap.sh
become: true
environment:
http_proxy: "{{ http_proxy | default('') }}"
https_proxy: "{{ https_proxy | default('') }}"
when:
- need_bootstrap.rc != 0

View File

@@ -12,8 +12,8 @@
tags:
- facts
- name: Check http::proxy in apt configuration files
raw: apt-config dump | grep -qsi 'Acquire::http::proxy'
- name: Check http::proxy in /etc/apt/apt.conf
raw: grep -qsi 'Acquire::http::proxy' /etc/apt/apt.conf
register: need_http_proxy
failed_when: false
changed_when: false
@@ -31,8 +31,8 @@
- http_proxy is defined
- need_http_proxy.rc != 0
- name: Check https::proxy in apt configuration files
raw: apt-config dump | grep -qsi 'Acquire::https::proxy'
- name: Check https::proxy in /etc/apt/apt.conf
raw: grep -qsi 'Acquire::https::proxy' /etc/apt/apt.conf
register: need_https_proxy
failed_when: false
changed_when: false

View File

@@ -25,26 +25,6 @@
tags:
- facts
- name: Check if a proxy is set in /etc/dnf/dnf.conf
raw: grep -qs 'proxy=' /etc/dnf/dnf.conf
register: need_http_proxy
failed_when: false
changed_when: false
# This command should always run, even in check mode
check_mode: false
environment: {}
when:
- http_proxy is defined
- name: Add http_proxy to /etc/dnf/dnf.conf if http_proxy is defined
raw: echo 'proxy={{ http_proxy }}' >> /etc/dnf/dnf.conf
become: true
environment: {}
when:
- http_proxy is defined
- need_http_proxy.rc != 0
- not is_atomic
# Fedora's policy as of Fedora 30 is to still install python2 as /usr/bin/python
# See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3 for the current status
- name: Install python on fedora

View File

@@ -1,33 +1,6 @@
---
# OpenSUSE ships with Python installed
- name: Set the http_proxy in /etc/sysconfig/proxy
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^HTTP_PROXY='
line: 'HTTP_PROXY="{{ http_proxy }}"'
become: true
when:
- http_proxy is defined
- name: Set the https_proxy in /etc/sysconfig/proxy
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^HTTPS_PROXY='
line: 'HTTPS_PROXY="{{ https_proxy }}"'
become: true
when:
- https_proxy is defined
- name: Enable proxies
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^PROXY_ENABLED='
line: 'PROXY_ENABLED="yes"'
become: true
when:
- http_proxy is defined or https_proxy is defined
# Without this package, the get_url module fails when trying to handle https
- name: Install python-cryptography
zypper:

View File

@@ -1,21 +0,0 @@
---
- name: Download Oracle Linux public yum repo
get_url:
url: https://yum.oracle.com/public-yum-ol7.repo
dest: /etc/yum.repos.d/public-yum-ol7.repo
- name: Enable Oracle Linux repo
ini_file:
dest: /etc/yum.repos.d/public-yum-ol7.repo
section: "{{ item }}"
option: enabled
value: "1"
with_items:
- ol7_latest
- ol7_addons
- ol7_developer_EPEL
- name: Install packages requirements for bootstrap
yum:
name: container-selinux
state: present

View File

@@ -25,9 +25,6 @@
- include_tasks: bootstrap-opensuse.yml
when: '"openSUSE" in os_release.stdout'
- include_tasks: bootstrap-oracle.yml
when: '"Oracle" in os_release.stdout'
- name: Create remote_tmp for it is used by another module
file:
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
@@ -72,11 +69,3 @@
- ceph-common
state: present
when: rbd_provisioner_enabled|default(false)
- name: Ensure bash_completion.d folder exists
file:
name: /etc/bash_completion.d/
state: directory
owner: root
group: root
mode: 0755

View File

@@ -1,44 +0,0 @@
---
kubelet_cgroup_driver: systemd
containerd_config:
grpc:
max_recv_message_size: 16777216
max_send_message_size: 16777216
debug:
level: ""
registries:
"docker.io": "https://registry-1.docker.io"
max_container_log_line_size: -1
containerd_version: '1.2.6'
containerd_package: 'containerd.io'
containerd_cfg_dir: /etc/containerd
# Path to runc binary
runc_binary: /usr/sbin/runc
yum_repo_dir: /etc/yum.repos.d
yum_conf: /etc/yum.conf
containerd_yum_conf: /etc/yum_containerd.conf
# Optional values for containerd apt repo
containerd_package_info:
pkgs:
containerd_repo_key_info:
repo_keys:
containerd_repo_info:
repos:
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
# Ubuntu docker-ce repo
containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
containerd_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
containerd_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
containerd_ubuntu_repo_component: 'stable'

View File

@@ -1,20 +0,0 @@
---
- name: restart containerd
command: /bin/true
notify:
- Containerd | restart containerd
- Containerd | wait for containerd
- name: Containerd | restart containerd
systemd:
name: containerd
state: restarted
enabled: yes
daemon-reload: yes
- name: Containerd | wait for containerd
command: "{{ containerd_bin_dir }}/ctr images ls -q"
register: containerd_ready
retries: 8
delay: 4
until: containerd_ready.rc == 0

View File

@@ -1,85 +0,0 @@
---
- name: ensure containerd repository public key is installed
action: "{{ containerd_repo_key_info.pkg_key }}"
args:
id: "{{ item }}"
url: "{{ containerd_repo_key_info.url }}"
state: present
register: keyserver_task_result
until: keyserver_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ containerd_repo_key_info.repo_keys }}"
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: ensure containerd repository is enabled
action: "{{ containerd_repo_info.pkg_repo }}"
args:
repo: "{{ item }}"
state: present
with_items: "{{ containerd_repo_info.repos }}"
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- containerd_repo_info.repos|length > 0
# This is required to ensure any apt upgrade will not break kubernetes
- name: Set containerd pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_containerd.j2"
dest: "/etc/apt/preferences.d/containerd"
owner: "root"
mode: 0644
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: Configure containerd repository on Fedora
template:
src: "fedora_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
when: ansible_distribution == "Fedora" and not is_atomic
- name: Configure containerd repository on RedHat/CentOS
template:
src: "rh_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: check if container-selinux is available
yum:
list: "container-selinux"
register: yum_result
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
yum_repository:
name: extras
description: "CentOS-7 - Extras"
state: present
baseurl: "{{ extras_rh_repo_base_url }}"
file: "extras"
gpgcheck: yes
gpgkey: "{{ extras_rh_repo_gpgkey }}"
keepcache: "{{ containerd_rpm_keepcache | default('1') }}"
proxy: " {{ http_proxy | default('_none_') }}"
when:
- ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- yum_result.results | length == 0
- name: Copy yum.conf for editing
copy:
src: "{{ yum_conf }}"
dest: "{{ containerd_yum_conf }}"
remote_src: yes
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Edit copy of yum.conf to set obsoletes=0
lineinfile:
path: "{{ containerd_yum_conf }}"
state: present
regexp: '^obsoletes='
line: 'obsoletes=0'
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic

View File

@@ -1,27 +0,0 @@
---
- name: crictl | Download crictl
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.crictl) }}"
- name: Install crictl config
template:
src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml
owner: bin
mode: 0644
- name: Copy crictl binary from download dir
synchronize:
src: "{{ local_release_dir }}/crictl"
dest: "{{ bin_dir }}/crictl"
compress: no
perms: yes
owner: no
group: no
delegate_to: "{{ inventory_hostname }}"
- name: Install crictl completion
shell: "{{ bin_dir }}/crictl completion >/etc/bash_completion.d/crictl"
ignore_errors: True
when: ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]

View File

@@ -1,95 +0,0 @@
---
- name: Fail containerd setup if distribution is not supported
fail:
msg: "{{ ansible_distribution }} is not supported by containerd."
when:
- not ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts
- include_tasks: containerd_repo.yml
- name: ensure containerd config directory
file:
dest: "{{ containerd_cfg_dir }}"
state: directory
mode: 0755
owner: root
group: root
- name: Copy containerd config file
template:
src: config.toml.j2
dest: "{{ containerd_cfg_dir }}/config.toml"
owner: "root"
mode: 0644
notify: restart containerd
# This is required to ensure any apt upgrade will not break kubernetes
- name: Set containerd pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_containerd.j2"
dest: "/etc/apt/preferences.d/containerd"
owner: "root"
mode: 0644
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: ensure containerd packages are installed
action: "{{ containerd_package_info.pkg_mgr }}"
args:
pkg: "{{ item.name }}"
force: "{{ item.force | default(omit) }}"
conf_file: "{{ item.yum_conf | default(omit) }}"
state: present
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
register: containerd_task_result
until: containerd_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ containerd_package_info.pkgs }}"
notify: restart containerd
when:
- not is_atomic
- containerd_package_info.pkgs|length > 0
ignore_errors: true
- name: Check if runc is installed
stat:
path: "{{ runc_binary }}"
register: runc_stat
- name: Install runc package if necessary
action: "{{ containerd_package_info.pkg_mgr }}"
args:
pkg: runc
state: present
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
register: runc_task_result
until: runc_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
notify: restart containerd
when:
- not is_atomic
- not runc_stat.stat.exists
- include_tasks: crictl.yml

View File

@@ -1,3 +0,0 @@
Package: {{ containerd_package }}
Pin: version {{ containerd_version }}*
Pin-Priority: 1001

View File

@@ -1,40 +0,0 @@
# Kubernetes doesn't use containerd restart manager.
disabled_plugins = ["restart"]
[debug]
level = "{{ containerd_config.debug.level | default("") }}"
{% if 'grpc' in containerd_config %}
[grpc]
{% for param, value in containerd_config.grpc.items() %}
{{ param }} = {{ value }}
{% endfor %}
{% endif %}
[plugins.linux]
shim = "/usr/bin/containerd-shim"
runtime = "{{ runc_binary }}"
[plugins.cri]
stream_server_address = "127.0.0.1"
max_container_log_line_size = {{ containerd_config.max_container_log_line_size }}
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
{% if 'registries' in containerd_config %}
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
{% for registry, addr in containerd_config.registries.items() %}
[plugins.cri.registry.mirrors."{{ registry }}"]
endpoint = ["{{ addr }}"]
{% endfor %}
{% endif %}

View File

@@ -1,4 +0,0 @@
runtime-endpoint: unix://{{ cri_socket }}
image-endpoint: unix://{{ cri_socket }}
timeout: 30
debug: false

View File

@@ -1,9 +0,0 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}

View File

@@ -1,9 +0,0 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}

View File

@@ -1,28 +0,0 @@
---
containerd_versioned_pkg:
'latest': "{{ containerd_package }}"
'1.2.4': "{{ containerd_package }}-1.2.4-3.1.el7"
'1.2.5': "{{ containerd_package }}-1.2.5-3.1.el7"
'1.2.6': "{{ containerd_package }}-1.2.6-3.3.el7"
'stable': "{{ containerd_package }}-1.2.6-3.3.el7"
'edge': "{{ containerd_package }}-1.2.6-3.3.el7"
containerd_package_info:
pkg_mgr: yum
pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
containerd_pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
yum_conf: "{{ containerd_yum_conf }}"
containerd_repo_key_info:
pkg_key: ''
repo_keys: []
containerd_repo_info:
pkg_repo: ''
repos: []
runc_binary: /bin/runc

View File

@@ -1,17 +0,0 @@
---
# docker-ce containerd.io does not contain daemon
containerd_package: containerd
containerd_package_info:
pkg_mgr: zypper
pkgs:
- name: "{{ containerd_package }}"
state: latest
containerd_repo_key_info:
pkg_key: ''
repo_keys: []
containerd_repo_info:
pkg_repo: ''
repos: []

View File

@@ -1,31 +0,0 @@
---
containerd_versioned_pkg:
'latest': "{{ containerd_package }}"
'1.2.4': "{{ containerd_package }}=1.2.4-1"
'1.2.5': "{{ containerd_package }}=1.2.5-1"
'1.2.6': "{{ containerd_package }}=1.2.6-3"
'stable': "{{ containerd_package }}=1.2.4-1"
'edge': "{{ containerd_package }}=1.2.4-1"
containerd_package_info:
pkg_mgr: apt
pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
force: false
containerd_repo_key_info:
pkg_key: apt_key
url: '{{ containerd_ubuntu_repo_gpgkey }}'
repo_keys:
- '{{ containerd_ubuntu_repo_repokey }}'
containerd_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ containerd_ubuntu_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ containerd_ubuntu_repo_component }}
runc_binary: /usr/bin/runc

View File

@@ -1,2 +1,2 @@
---
crio_rhel_repo_base_url: 'http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin311/'
crio_rhel_repo_base_url: 'https://cbs.centos.org/repos/paas7-openshift-origin311-candidate/x86_64/os/'

View File

@@ -22,13 +22,7 @@
description: OpenShift Origin Repo
baseurl: "{{ crio_rhel_repo_base_url }}"
gpgcheck: no
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
- name: Add CRI-O PPA
apt_repository:
repo: ppa:projectatomic/ppa
state: present
when: ansible_distribution in ["Ubuntu"]
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Make sure needed folders exist in the system
with_items:

View File

@@ -64,7 +64,7 @@ file_locking = true
# This is a mandatory setting as this runtime will be the default one
# and will also be used for untrusted container workloads if
# runtime_untrusted_workload is not set.
{% if ansible_os_family == "ClearLinux" or ansible_os_family == "RedHat" or ansible_distribution == "Ubuntu" %}
{% if ansible_os_family == "ClearLinux" %}
runtime = "/usr/bin/runc"
{% else %}
runtime = "/usr/sbin/runc"
@@ -96,7 +96,7 @@ default_workload_trust = "trusted"
no_pivot = false
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "{{ crio_conmon }}"
conmon = "/usr/libexec/crio/conmon"
# conmon_env is the environment variable list for conmon process,
# used for passing necessary environment variable to conmon or runtime.

View File

@@ -3,4 +3,3 @@ crio_packages:
- containers-basic
crio_service: crio
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -4,4 +4,3 @@ crio_packages:
- cri-tools
crio_service: cri-o
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -5,4 +5,3 @@ crio_packages:
- oci-systemd-hook
crio_service: crio
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -1,6 +0,0 @@
---
crio_packages:
- "cri-o-{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
crio_service: crio
crio_conmon: /usr/lib/crio/bin/conmon

View File

@@ -38,6 +38,11 @@ docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
# Debian docker-ce repo
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
# dockerproject repo
dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
docker_bin_dir: "/usr/bin"
# CentOS/RedHat Extras repo
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"

View File

@@ -1,8 +0,0 @@
---
- name: Install Docker plugin
command: docker plugin install --grant-all-permissions {{ docker_plugin | quote }}
when: docker_plugin is defined
register: docker_plugin_status
failed_when:
- docker_plugin_status.failed
- '"already exists" not in docker_plugin_status.stderr'

View File

@@ -4,8 +4,7 @@
path: /run/ostree-booted
register: ostree
- name: set is_atomic
set_fact:
- set_fact:
is_atomic: "{{ ostree.stat.exists }}"
- name: gather os specific variables
@@ -27,6 +26,9 @@
tags:
- facts
# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
# openSUSE version so we can't use it. The only alternative is to use the docker
# packages from the distribution repositories.
- name: Warn about Docker version on SUSE
debug:
msg: "SUSE distributions always install Docker from the distro repos"
@@ -101,16 +103,11 @@
dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution == "Fedora" and not is_atomic
- name: Configure docker repository on RedHat/CentOS/Oracle Linux
yum_repository:
name: docker-ce
baseurl: "{{ docker_rh_repo_base_url }}"
description: "Docker CE Stable - $basearch"
gpgcheck: yes
gpgkey: "{{ docker_rh_repo_gpgkey }}"
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
proxy: " {{ http_proxy | default('_none_') }}"
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
- name: Configure docker repository on RedHat/CentOS
template:
src: "rh_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: check if container-selinux is available
yum:
@@ -138,7 +135,7 @@
src: "{{ yum_conf }}"
dest: "{{ docker_yum_conf }}"
remote_src: yes
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Edit copy of yum.conf to set obsoletes=0
lineinfile:
@@ -146,7 +143,7 @@
state: present
regexp: '^obsoletes='
line: 'obsoletes=0'
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
@@ -194,11 +191,13 @@
- ansible_distribution == 'Ubuntu'
# This is required to ensure any apt upgrade will not break kubernetes
- name: Tell Debian hosts not to change the docker version with apt upgrade
dpkg_selections:
name: docker-ce
selection: hold
when: ansible_os_family in ["Debian"]
- name: Set docker pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_docker.j2"
dest: "/etc/apt/preferences.d/docker"
owner: "root"
mode: 0644
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "ClearLinux", "RedHat", "Suse"] or is_atomic)
- name: ensure docker started, remove our config if docker start failed and try again
block:
@@ -237,12 +236,6 @@
resolvconf_mode == 'docker_dns' and
installed_docker_version.stdout is version('1.12', '<')
# Install each plugin using a looped include to make error handling in the included task simpler.
- include_tasks: docker_plugin.yml
loop: "{{ docker_plugins }}"
loop_control:
loop_var: docker_plugin
- name: Set docker systemd config
import_tasks: systemd.yml

View File

@@ -1,12 +1,4 @@
---
- name: Remove legacy docker repo file
file:
path: "{{ yum_repo_dir }}/docker.repo"
state: absent
when:
- ansible_distribution in ["CentOS","RedHat","OracleLinux"]
- not is_atomic
- name: Ensure old versions of Docker are not installed. | Debian
apt:
name: '{{ docker_remove_packages_apt }}'

View File

@@ -0,0 +1,3 @@
Package: docker-ce
Pin: version {{ docker_version }}.*
Pin-Priority: 1001

View File

@@ -1,15 +1,5 @@
[Service]
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }} \
{% for i in docker_insecure_registries %}--insecure-registry={{ i }} {% endfor %} \
{% for i in docker_registry_mirrors %}--registry-mirror={{ i }} {% endfor %} \
{% if docker_version != "latest" and docker_version is version('17.05', '<') %}--graph={% else %}--data-root={% endif %}{{ docker_daemon_graph }} \
{% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %} \
{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %} \
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd \
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false \
{% endif %}"
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }}
{% endif %}

View File

@@ -0,0 +1,17 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
[docker-engine]
name=Docker-Engine Repository
baseurl={{ dockerproject_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ dockerproject_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}

View File

@@ -2,6 +2,7 @@
docker_kernel_min_version: '3.10'
# https://download.docker.com/linux/debian/
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
docker_versioned_pkg:
'latest': docker-ce
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
@@ -37,7 +38,7 @@ docker_repo_info:
dockerproject_repo_key_info:
pkg_key: apt_key
url: '{{ docker_debian_repo_gpgkey }}'
url: '{{ dockerproject_apt_repo_gpgkey }}'
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
@@ -45,6 +46,6 @@ dockerproject_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ docker_debian_repo_base_url }}
deb {{ dockerproject_apt_repo_base_url }}
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@@ -3,6 +3,7 @@ docker_kernel_min_version: '0'
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
# https://yum.dockerproject.org/repo/main/centos/7
# or do 'yum --showduplicates list docker-engine'
docker_versioned_pkg:
'latest': docker-ce

View File

@@ -37,7 +37,7 @@ docker_repo_info:
dockerproject_repo_key_info:
pkg_key: apt_key
url: '{{ docker_debian_repo_gpgkey }}'
url: '{{ dockerproject_apt_repo_gpgkey }}'
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
@@ -45,6 +45,6 @@ dockerproject_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ docker_debian_repo_base_url }}
deb {{ dockerproject_apt_repo_base_url }}
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@@ -33,7 +33,7 @@ docker_repo_info:
dockerproject_repo_key_info:
pkg_key: apt_key
url: '{{ docker_debian_repo_gpgkey }}'
url: '{{ dockerproject_apt_repo_gpgkey }}'
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
@@ -41,6 +41,6 @@ dockerproject_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ docker_debian_repo_base_url }}
deb {{ dockerproject_apt_repo_base_url }}
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@@ -7,13 +7,6 @@ dependencies:
- container-engine
- crio
- role: container-engine/containerd
when:
- container_manager == 'containerd'
tags:
- container-engine
- containerd
- role: container-engine/docker
when:
- container_manager == 'docker'

View File

@@ -1,23 +1,9 @@
---
local_release_dir: /tmp/releases
download_cache_dir: /tmp/kubespray_cache
# do not delete remote cache files after using them
# NOTE: Setting this parameter to TRUE is only really useful when developing kubespray
download_keep_remote_cache: false
# Only useful when download_run_once is false: Localy cached files and images are
# uploaded to kubernetes nodes. Also, images downloaded on those nodes are copied
# back to the ansible runner's cache, if they are not yet preset.
download_force_cache: false
# Used to only evaluate vars from download role
skip_downloads: false
# Optionally skip kubeadm images download
skip_kubeadm_images: false
kubeadm_images: {}
# if this is set to true will only download files once. Doesn't work
# on Container Linux by CoreOS unless the download_localhost is true and localhost
# is running another OS type. Default compress level is 1 (fastest).
@@ -49,210 +35,150 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kub
image_arch: "{{host_architecture | default('amd64')}}"
# Versions
kube_version: v1.15.11
kube_version: v1.14.6
kubeadm_version: "{{ kube_version }}"
etcd_version: v3.3.10
etcd_version: v3.2.26
# kubernetes image repo define
kube_image_repo: "gcr.io/google-containers"
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v3.7.3"
calico_ctl_version: "v3.7.3"
calico_cni_version: "v3.7.3"
calico_policy_version: "v3.7.3"
calico_version: "v3.4.0"
calico_ctl_version: "v3.4.4"
calico_cni_version: "v3.4.0"
calico_policy_version: "v3.4.0"
calico_rr_version: "v0.6.1"
calico_typha_version: "v3.7.3"
calico_typha_version: "v3.4.4"
flannel_version: "v0.11.0"
flannel_cni_version: "v0.3.0"
cni_version: "v0.8.1"
cni_version: "v0.6.0"
weave_version: 2.5.2
weave_version: 2.5.1
pod_infra_version: 3.1
contiv_version: 1.2.1
cilium_version: "v1.5.5"
kube_ovn_version: "v0.6.0"
cilium_version: "v1.3.0"
kube_router_version: "v0.2.5"
multus_version: "v3.2.1"
crictl_version: "v1.15.0"
multus_version: "v3.1.autoconf"
# Download URLs
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/hyperkube"
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-{{ image_arch }}-{{ cni_version }}.tgz"
calicoctl_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
crictl_checksums:
arm:
v1.15.0: f31f8c3b4791608a48d030d1aa1a694a73849ae057b23a90ce4ef17e5afde9e8
v1.14.0: 9910cecfd6558239ba015323066c7233d8371af359b9ddd0b2a35d5223bcf945
v1.13.0: 2e478ebed85f9d70d49fd8f1d1089c8fba6e37d3461aeef91813f1ab0f0df586
arm64:
v1.15.0: 785c3da7e058f6fd00b0a48de24b9199eb6bae940d13f509c44ea6dd7ad9ffcd
v1.14.0: f76b3d00a272c8d210e9a45f77d07d3770bee310d99c4fd9a72d6f55278882e5
v1.13.0: 68949c0cb5a37e7604c145d189cf1e109c08c93d9c710ba663db026b9c6f2746
amd64:
v1.15.0: c3b71be1f363e16078b51334967348aab4f72f46ef64a61fe7754e029779d45a
v1.14.0: 483c90a9fe679590df4332ba807991c49232e8cd326c307c575ecef7fe22327b
v1.13.0: 9bdbea7a2b382494aff2ff014da328a042c5aba9096a7772e57fdf487e5a1d51
# Checksums
hyperkube_checksums:
arm:
v1.15.11: a7fce8d8b7d3e33c3c68100e21d75fd97a079fcc0ec5ade44996e47dd5c26d35
v1.15.10: b8d4e46376a9d42d87f16b91d5900bf1aba4a2bf9bc943a9ab628cf06e382117
v1.15.9: 34c7d3076b4c5a1b778a1f61dcfe55edf7bf9ae096baabcd2b0619610cd43ace
v1.15.8: 45b2025b2455110bc0b4b54addfd4b27de855763e6b2b33b8a90da95c7187fd9
v1.15.7: 25efadd6c8f699ba7bb768efc9438378422e1cc6f9a56b489f53d0013c2c9d25
v1.15.6: 0addfb93b236afc51d9636d3008451ecc1e59662afcd02443ff1147ce3d1900e
v1.15.5: d16b3d28c2ce23e591aaa58ee4fa20fe7ab841f7a7b46a3d641a64e3fab272a4
v1.15.4: 480f2428a18875bce1deb24e6e3ef39a5f29f6cd7b8b02c93b6e6c3ee27e896e
v1.15.3: 100d8bddb29e77397b90e6dfbcf0af2d901a90ea4bde90b83b5a39f394c3900b
v1.15.2: eeaa8e071541c7bcaa186ff1d2919d076b27ef70c9e9df70f910756eba55dc99
v1.15.1: fc5af96fd9341776d84c38675be7b8045dee20af327af9331972c422a4109918
v1.15.0: d923c781031bfd97d0fbe50311e4d7c3616aa5b6d466b99049931f09d73d07b9
v1.14.5: 860b84dd32611a6008fe20fb998a2fc0a25ff44067eae556224827d05429c91e
v1.14.4: 429a10369b2ef35a9c2d662347277339d53fa66ef55ffeabcc7d9b850e31056d
v1.14.3: 3fac785261bcf79f7a80b12c4a1dda893ce8c0879caf57b36d4701730671b574
v1.14.2: 6929a59850c8702c04d62cd343d1143b17456da040f32317e09f8c25a08d2346
v1.14.1: 839a4abfeafbd5f5ab057ad0e8a0b0b488b3cde14a646eba040a7f579875f565
v1.14.0: d090b1da23564a7e9bb8f1f4264f2116536c52611ae203fe2ca13eaad0a8003e
arm64:
v1.15.11: 0b50954efe4a0b5c86a0e4db83e93880f6e12c04907d715d751ce0c545c8d5f4
v1.15.10: cb47d4ff05a456c6a06c51908e676d0deeeb05f1feaeb3c29049b11235e85dc0
v1.15.9: c9acae4d1a107a893b5a61727f4bf774cc35c58886a002888811d631d1c59b47
v1.15.8: f875b6438c1662ddba7a22503eeddf3392c198ec2e58858da236d7af53e4ceef
v1.15.7: 63dfe4330d42d04fd5b3e18d10afd360df1fa450be79f970f29f482b6ce170e6
v1.15.6: e117eabe736c2518854a308f86ac16d9d59a8e9a6299da0c93d6a26fc97919bd
v1.15.5: 04d5f6b859083373e0be95b5040b514e141506d060b5ae41c1d606e6c81d682f
v1.15.4: febe35c7390119b08073b6b348e5934ba308e4b3197638b94d16bcc0b37dedb4
v1.15.3: 1e3e70b8d1e8ebc642f2801d9c7938a27764dfb2f5aea432ab4326d43c04a1f5
v1.15.2: c4cf69f52c7013faee9d54e0f376e0732a4a7b0f7ffc7241e9b7e28bad0ac77f
v1.15.1: 80ed372c5f6c5178df88616175310057c06bdc9d0905953814a1927eb3aaa657
v1.15.0: 824af7d925b87a5ade63575b98b59ee81005fc76eac1dc399602308d7a60bc3c
v1.14.6: 97646bffe61e54a0c6f61d68b5625ec2e98d8b9d04cec2c8382266e437835e93
v1.14.5: 90c77847d64eb857c8e686e8593fe7a9e505bcbf960b0407217255827a9da59a
v1.14.4: 9e0b4fde88a07c705e0937cd3161392684e3ca08535d14a99ae3b86bbf4c56b3
v1.14.3: f29211d668cbcf1aa415dfa64aad95ffc53b5410482a23cddb680caec4e907a3
v1.14.2: 959fb7d9c17fc8f7cb1a69920aaf08aefd62c0fbf6b5bdc46250f147ea6a5cd4
v1.14.1: d5236efc2547fd07c7cc2ed9345dfbcd1204385847ca686cf1c62d15056de399
v1.14.0: 708e00a41f6516d525dee00c91ebe3c3bf2feaf9b7f0af7689487e3e17e356c2
v1.13.5: 8ffd84ba0cb6382a0ff96000458db8a83c92cac09458defe8496f0f0e155a6a8
v1.13.4: b9e909e388634d103fe5376aafa313bed5e69293383b0c740de4fe8e18d42d12
v1.13.3: 588037923b7f4090f5f7a3de23ea49a10345295f0b39bd0c1ebdaa24eaa76731
v1.13.2: 7f2c2b0c6dcc81102a89fa41957db214416fc8a0cfae664fc0e150a7d3ad337b
v1.13.1: 66205d99ec93090c6d814ab1de7c38cd84257d3dcf3a957618fad5878caea13d
v1.13.0: 4391ea0d8d472c1737f1ce945756bf2a11395c708824c780d1a44fbddf031e59
v1.12.6: 29036599f173ceeab2c72dff589aa02d5a55b45143c70de7c08cdea75a282bc2
v1.12.5: 0b953f2d564d2f9298f3fc3ad6279cf4b18c1c967ebd2d542b79dda54e3aa27a
v1.12.4: f66fc2c945c757d6b34cdd654c3d951f74f366026f9af0dd10eb64e482584811
v1.12.3: a6142356fdbc8915cb474437355f809d987f6b983e21081dc3e18ea86c76bf85
v1.12.2: 81e6fdd4e3ed8687a37d9b1830aa43d508fb7d5061c81cb2576a17077382b614
v1.12.1: 6863440b5516c94f48a3a23bf325a007af09f5412f335444e204bc4b09fbad2a
v1.12.0: 3eb06e2344ea5e4988fdda168627319e7b10665f7f9fc9d96c477ccb39d0b061
amd64:
v1.15.11: 25b93bbf65d1cafc2dff34dd547483401e236b840cc73ab8f12269cda98a4f98
v1.15.10: 22789e14800fc71498d6f1a147dbc38e9c3c31f652963cf7cd45c40c23b77aa4
v1.15.9: 5f666ef43073d8d38b96d9e6615023b9c8876c2bc39efb709167faeefdb3446c
v1.15.8: 6b5b3cfb24d0d1554483ca7e575818f461c30d2649c4fa6db4181061fc22affc
v1.15.7: 001eb004940563200a4a6fb1163ba61f56adfcef98780113daa045f67c47dace
v1.15.6: 5864218351364e4c47889029a2dcb07e4737fdb71656f4855f7cdc6541161dee
v1.15.5: 15988bea1177a3e169aa43177456b6f58a15318165b15144c47ccd044de9edfe
v1.15.4: 7b8d6a151cde98a22c4fbc9d93e2fe29a13fc1a3d97d84904682488fc34453f1
v1.15.3: 3685c65b4fb85d552f77346900affc2e9a1bc997b4cd3dde0e705fd8c1d9be7a
v1.15.2: ab885606438748eb89a7738e219f5353d94c40c63a4935a539ce89760280f065
v1.15.1: 22b7b1e7f5f2a452d62e0ca4c2cba67119c51e04219aaeaf8452825f9177069e
v1.15.0: 3cc72cc58517b97c608c7a59a20255675bc70f07217c9e11e58cac7746139283
v1.14.6: 4f9a8984985786797fa3353961ba2b58f50235581c9b5978130fbb4199005538
v1.14.5: 2c3410518980b8705ba9b7b708076a206f2bde37cb8bf5ba8f15c32c697f4d97
v1.14.4: 5f31434f3a884257a7b0e3178fc869720a7526c8637af5713d23433ddf2592dd
v1.14.3: 6c6cb5c118b2129ba4e56697f42567be3587eb636a477cd342b69f87b3b049d1
v1.14.2: 05546057f2053e085fa8387ab82581c95fe4195cd783408ccbb4fc3487c50176
v1.14.1: fb34b98da9325feca8daa09bb934dbe6a533aad69c2a5599bbed81b99bb9c267
v1.14.0: af8b04504365dbe4ce6a1772f42eb390d4221a21149b522fc8a0c4b1cd3d97aa
v1.13.5: 1a8a357ebfeab8ec62d0c6f11b59df1a93d6711c3a16e1501da32b55c144c73a
v1.13.4: 6f2d755a350efec8b3b29e0ddf8362f60475cc10d42dea37f8f2159f7776867b
v1.13.3: b238c772b5e4b9deed0cdc695fe86324660d037b38c6d6d7eeae7d7a657840c7
v1.13.2: f159b587ec80ad03bf3b9bb09de5d64b773d01b0e34f2a4f1c816879c56aae6d
v1.13.1: f64c4328d3853f3e5680e7d296b0f3ed25e67ff98321867309edea100ebb4fd7
v1.13.0: 754f1baae5dc2ba29afc66e1f5d3b676ee59cd5c40ccce813092408d53bde3d9
v1.12.6: eb7bd0c21977bca7071c65fa0ef60d5e09c9e9a16c4fd8435be5bd7f5b0d1221
v1.12.5: f8b651816b2caa33e8b25a666e5c370e9786356d59f89579bba772f28370ed00
v1.12.4: a4697d8f3791f0408fcdb97b3de187e47d7b39a63332c75f68f95e25f4891cc9
v1.12.3: 600aad3f0d016716abd85931239806193ffbe95f2edfdcea11532d518ae5cdb1
v1.12.2: 566dfed398c20c9944f8999d6370cb584cb8c228b3c5881137b6b3d9306e4b06
v1.12.1: 4aa23cfb2fc2e2e4d0cbe0d83a648c38e4baabd6c66f5cdbbb40cbc7582fdc74
v1.12.0: f80336201f3152a5307c01f8a7206847398dde15c69b3d20c76a7d9520b60daf
kubeadm_checksums:
arm:
v1.15.11: 47eb7b7e14cd58531bb39bbee618259b3d0a14e4d9cd9135ce4670bd8295e644
v1.15.10: 2515eb19b8cbd50dc639ec15d90835f3720cb64f9550e140b8f408ede9373f55
v1.15.9: 90047aa32b071f05892764c5bcfd28dcc6e0de51ea7af8c41269cde9eb15dfe2
v1.15.8: 612dbcdbcd2ba6ec2f54b431bee3d58589e6b50dcd707528bc83e89bf74128aa
v1.15.7: 313a5cde31a2c892dbde82f3c8ec3675787b8f26f27f14533d38bdf326f6c872
v1.15.6: 0c6b9e3c91476b75e716c6789783c4bd0d480e94690b5e556b7d96b61fcf227e
v1.15.5: 4a4ed964b61bbe99c4293c5ef0168cc8c2601d285e525d177b8b0d478960a8ab
v1.15.4: 69984698052f1563fd44d78e1a68e140a552df7ed57ccd9c83bdfd82bc6103dd
v1.15.3: 6c6fa56810908b5be83882094ea199844edc94b7e969160623c86512d9251c06
v1.15.2: 4b35ad0031c08a83de7c8d9f9bbed6a30d93a5c74e16ea9e6211ad2e0e12bdd1
v1.15.1: 855abd520291dcef0577a1a2ef87a70f522fd2b22603a12abcd86c2f7ec9c022
v1.15.0: 9464030a1d4e101de5f47348f3514d5a9eb95cbce2e5e31f53ada1ca485cf75e
v1.14.5: 0bb551f7468de2fa6f98ce60653495327be052364ac9f9e8917a4d1ad864412b
v1.14.4: 36835488d7187406690ee6aa4b3c9c54855cb5c55d786d0574a508b955fe3a46
v1.14.3: 270b8c346aeaa309d11d65695c4a90f6bff5b1ea14bdec3c417ca2dfb3de0db3
v1.14.2: d2a59269aa68a4bace2a80b247b6f9a82f0542ec3004185fb0ba86e181fdfb29
v1.14.1: 4bd111411208f1270ed3af8780b87d24a3c17c9fdbe4b0f8c7a9a21cd765543e
v1.14.0: 11f2cfa8bf7ee177dbac8073ab0f039dc265536baaa8dc0c4dea699f981f6fd1
arm64:
v1.15.11: e947c5472e5167503a502e5825f8d11aa40d15bb8f2181d43331807bcd2a7731
v1.15.10: b318749f2865c403ce64f48ccb1d189597e19b726f9e866ae34108de3cc2916f
v1.15.9: 99f52bfb87a5e4720768d6249e4d450604690a77ad5afa6e4f246cf0d914b62d
v1.15.8: 1af2abc47e15aeafca6f8b10eaaca59746d7c4645d63fedd007ba0b455e3528a
v1.15.7: 39488a6b7d887d1ff4fe4801724e512ee547752c5337e3e50b8e32eade1e376a
v1.15.6: 79aea11d6aaf7792135cabecd7446c9725966be7daf24a441ba89d9dec918d00
v1.15.5: 26e0587398cf4b5bf4456aa65c507aa3713498025a43e3ae1654f54295f27464
v1.15.4: df6747066627f8d803033c20e1161c0cd68f3e8ffd72a972f1cfc4221c67c6e9
v1.15.3: 6f472bc8ab1ba3d76448bd45b200edef96741e5affde8dc1429300af3a4904d8
v1.15.2: d3b6ee2048b366726ca366d2db4c46b2cacc38e8ec09cc35781d16593753d930
v1.15.1: 44fbfad0f1026d249fc4f365f1e9562cd52d75360d4d1032731122ba5a4d57dc
v1.15.0: fe3c79070814fe847a23209b1027672fe5c5e7e5c9611e329225058926836f96
v1.14.6: d935de033e7442ce5f8a35294fa890b884454d0482a9cf136c4abacd8c6ec165
v1.14.5: 7dd1195d16980c4c888d13e49d97c3513f668e192bf2778bc0f0516e0f7fe2ac
v1.14.4: 60745b3ac761d3aa55ab9a24677ecf4e7f48b5abed34c725047a174456e5a79b
v1.14.3: 8edcc07c65f81eea3fc47cd237dd6560c6907c5e0ca52d71eab53ca1164e7d01
v1.14.2: bff0712b87796509129aa802ad3ac25b8cc83af01762b22b4dcca8dbdb26b520
v1.14.1: 5cf05464168e45ee4719264a267c65f9319fae1ceb9923fedab97a9d6a629e0b
v1.14.0: 7ed9d706e50cd6d3fc618a7af3d19b691b8a5343ddedaeccb4ea09af3ecfae2c
v1.13.5: 59a1995c171e5c1e74f5d02657eb2c155706f2d159ec1847b64dc866228c40d2
v1.13.4: 4de71d4cfa4dc64127148d48f3a1a1fa7ea24cf0c4fa42957459d0e7f9c03799
v1.13.3: bef1cbc2d199d32a1a31e70b864dc539b24e3c1cb87b50a1295cf03bec4832b0
v1.13.2: 08279a3bfeff8c4f6768d6fd92ceff8276a555f9e81bf9d541112fc8eb29963e
v1.13.1: 0f5c2c8a1ffe235785c0a38c9a6530d3d9e67b00e9a07c9d5dca4c36ede2e078
v1.13.0: efc2669952b05161e181f0805bb0647308891259528a4868e69f4b1b68c70489
v1.12.6: 2552b6b623c0c390d495e1fcfbecbebb2ca8853bce4011ce9b9dc3f1763a9b2b
v1.12.5: f8b212c4a63d28e800a312c3785a62650c3c5bb26326e414d59e1a548d68fbae
v1.12.4: 929ec24bee4d7645a18b157d6987554a131fde8d1efb704391bdfe81e6dcf1d4
v1.12.3: e9e54a553447391de59a21d3da5b58d61d3431877de194434b4ae6544594009e
v1.12.2: 2230dfabd76a4d0888facccb3ec3c802b658e835aaa817cbce2310d3f8533fc4
v1.12.1: 226b9026ef913e98c2966503fde6973e3e33b5621e9c240667093dcb786bd811
v1.12.0: c0d4a75615791e6880d051d6d601eb703e0ac3ec64f94f156b76351368b2eb9c
amd64:
v1.15.11: 2ebc93a6f64419e5e7f26a4674c6784463c7b2a51c0afe8ec2c0423471e7cec3
v1.15.10: f4a6fb64125d3f517976a68db2ba0f76a85467681b6e1b50b95ea3397ec7e520
v1.15.9: 366a7f260cbd1aaa2661b1e3b83a7fc8781c8a8b07c71944bdaf66d49ff5abae
v1.15.8: 9c5a176ea2f4dbf383557211873ec95fe4ffdb5d54d4311f00b92ec592d2bae7
v1.15.7: d64d6e4a711d293758476ec3183091cbfeb1ed0a19d92eda8ff3350017ed6ba0
v1.15.6: e1699c7afa090453241a009d9878fdd405a48f052e93e2ff056a8f2cf3a1cae7
v1.15.5: e64bb0b2cfdcaa1f4063879bb358848c41aa1b5cc18b75c91994d11a9bf8c136
v1.15.4: 3acf748ec5d69f316da85fb1e75945afb028f1e207ecb0b5986e23932c040194
v1.15.3: ec56a00bc8d9ec4ac2b081a3b2127d8593daf3b2c86560cf9e6cba5ada2d5a80
v1.15.2: fe2a13a1dea73249560ea44ab54c0359a9722e9c66832f6bcad86798438cba2f
v1.15.1: 3d42441ae177826f1181e559cd2a729464ca8efadef196cfa0e8053a615333b5
v1.15.0: fc4aa44b96dc143d7c3062124e25fed671cab884ebb8b2446edd10abb45e88c2
v1.14.6: 4ef6030ab059ed434702c003975273dc855c370c4fcdae1109a3bb137c16ecb9
v1.14.5: b3e840f7816f64e071d25f8a90b984eecd6251b68e568b420d85ef0a4dd514bb
v1.14.4: 291790a1cef82c4de28cc3338a199ca8356838ca26f775f2c2acba165b633d9f
v1.14.3: 026700dfff3c78be1295417e96d882136e5e1f095eb843e6575e57ef9930b5d3
v1.14.2: 77510f61352bb6e537e70730b670627963f2c314fbd36a644b0c435b97e9705a
v1.14.1: c4fc478572b5623857f5d820e1c107ae02049ca02cf2993e512a091a0196957b
v1.14.0: 03678f49ee4737f8b8c4f59ace0d140a36ffbc4f6035c59561f59f45b57d0c93
v1.13.5: 274bf887039a9993e30f96047a4a474c39e8471c4094acb75aea6beed793f079
v1.13.4: c4300d1f3ebccad48c8e267e45a736c7d227b0e45ef36582fa8dcfe2ef7b1b10
v1.13.3: ab767ea53e45aceba628977ef6c8c62eace72d6d232efeaf35ac50cbea5f3739
v1.13.2: 7cb0ce57c1e6e2d85e05de3780a2f35a191fe93f89cfc5816b424efcf39834b9
v1.13.1: 438173bfa0b7014ecae994c5b9e1f27e1328ab971a3fdb06a393a8095a176ba0
v1.13.0: f5366206416dc4cfc840a7add2289957b56ccc479cc1b74f7397a4df995d6b06
v1.12.6: 9048031930be9cb0506940c04f6ce67408d9caa9384b32d65d7aa5b6f1ad58ec
v1.12.5: d61730b3deb4d9825af0cc1e452a4be2292400507128279770c39669f6599af9
v1.12.4: 674ad5892ff2403f492c9042c3cea3fa0bfa3acf95bc7d1777c3645f0ddf64d7
v1.12.3: c675aa3be82754b3f8dfdde2a1526a72986713312d46d898e65cb564c6aa8ad4
v1.12.2: 51bc4bfd1d934a27245111c0ad1f793d5147ed15389415a1509502f23fcfa642
v1.12.1: 5d95efd65aad398d85a9802799f36410ae7a95f9cbe73c8b10d2213c10a6d7be
v1.12.0: 463fb058b7fa2591fb01f29f2451b054f6cbaa0f8a20394b4a4eb5d68473176f
etcd_binary_checksums:
# Etcd does not have arm32 builds at the moment, having some dummy value is
# required to avoid "no attribute" error
arm: 0
arm64: 5ec97b0b872adce275b8130d19db314f7f2b803aeb24c4aae17a19e2d66853c4
amd64: 1620a59150ec0a0124a65540e23891243feb2d9a628092fb1edcc23974724a45
arm64: c219b254ece7d7e308ae41569fa240dbae2de460bed818ee39b408b73f6360ef
amd64: 127d4f2097c09d929beb9d3784590cc11102f4b4d4d4da7ad82d5c9e856afd38
cni_binary_checksums:
arm: ae6ddbd87c05a79aceb92e1c8c32d11e302f6fc55045f87f6a3ea7e0268b2fda
arm64: acde854e3def3c776c532ae521c19d8784534918cc56449ff16945a2909bff6d
amd64: e9bfc78acd3ae71be77eb8f3e890cc9078a33cc3797703b8ff2fc3077a232252
arm64: 016bbc989877e35e3cd49fafe11415fb2717e52c74fde6b1650411154cb91b81
amd64: f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d
calicoctl_binary_checksums:
arm:
v3.6.1: 0
v3.5.4: 0
v3.4.4: 0
v3.7.3: 0
amd64:
v3.6.1: 3b01336de37550e020343d62a38c96c4605d33a3ed7ddba2fe38bc172a5b42b5
v3.5.4: 197194b838cc2a9a7455c2ebd5505a5e24f8f3d994eb75c17f5dd568944100b8
v3.4.4: 93bd084e053cf1bf3b7fef369677bd6767c30fe7135e2c7e044e31693422ef61
v3.7.3: 932f68e893e80e95e10f064f1e7745e438d456f41a6ff12d11bb16ca0cab735c
arm64:
v3.6.1: 60fbaeb257061647bdf12b5ede7a0d4298a5ee216f6472e5a92bb14ef5c2a5d3
v3.5.4: a4481178665658658a73e4ceca9a1dff5cccded4179615c91d1c3e49fd96f237
v3.4.4: ff35d9e8b5c00e9fe47d05e8f5123ec98fd641370f8cd93f4fbb3d913da77ab6
v3.7.3: 7cfaab25c287f7ef93b2682d060b55bf39f76b668540de50376b5ed174209832
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch] }}"
cni_binary_checksum: "{{ cni_binary_checksums[image_arch] }}"
hyperkube_binary_checksum: "{{ hyperkube_checksums[image_arch][kube_version] }}"
kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}"
calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}"
crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}"
# Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
@@ -303,18 +229,8 @@ contiv_ovs_image_repo: "docker.io/contiv/ovs"
contiv_ovs_image_tag: "latest"
cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
cilium_init_image_repo: "docker.io/cilium/cilium-init"
cilium_init_image_tag: "2019-04-05"
cilium_operator_image_repo: "docker.io/cilium/operator"
cilium_operator_image_tag: "{{ cilium_version }}"
kube_ovn_db_image_repo: "docker.io/kubeovn/kube-ovn-db"
kube_ovn_node_image_repo: "docker.io/kubeovn/kube-ovn-node"
kube_ovn_cni_image_repo: "docker.io/kubeovn/kube-ovn-cni"
kube_ovn_controller_image_repo: "kubeovn/kube-ovn-controller"
kube_ovn_db_image_tag: "{{ kube_ovn_version }}"
kube_ovn_node_image_tag: "{{ kube_ovn_version }}"
kube_ovn_controller_image_tag: "{{ kube_ovn_version }}"
kube_ovn_cni_image_tag: "{{ kube_ovn_version }}"
cilium_init_image_repo: "docker.io/library/busybox"
cilium_init_image_tag: "1.28.4"
kube_router_image_repo: "docker.io/cloudnativelabs/kube-router"
kube_router_image_tag: "{{ kube_router_version }}"
multus_image_repo: "docker.io/nfvpe/multus"
@@ -325,22 +241,22 @@ nginx_image_tag: 1.15
haproxy_image_repo: docker.io/haproxy
haproxy_image_tag: 1.9
coredns_version: "1.6.0"
coredns_version: "1.5.0"
coredns_image_repo: "docker.io/coredns/coredns"
coredns_image_tag: "{{ coredns_version }}"
nodelocaldns_version: "1.15.5"
nodelocaldns_image_repo: "{{ kube_image_repo }}/k8s-dns-node-cache"
nodelocaldns_version: "1.15.1"
nodelocaldns_image_repo: "k8s.gcr.io/k8s-dns-node-cache"
nodelocaldns_image_tag: "{{ nodelocaldns_version }}"
dnsautoscaler_version: 1.6.0
dnsautoscaler_version: 1.4.0
dnsautoscaler_image_repo: "k8s.gcr.io/cluster-proportional-autoscaler-{{ image_arch }}"
dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}"
test_image_repo: docker.io/busybox
test_image_tag: latest
busybox_image_repo: docker.io/busybox
busybox_image_tag: 1.29.2
helm_version: "v2.14.3"
helm_version: "v2.13.1"
helm_image_repo: "docker.io/lachlanevenson/k8s-helm"
helm_image_tag: "{{ helm_version }}"
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
@@ -350,11 +266,11 @@ registry_image_repo: "docker.io/registry"
registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4"
metrics_server_version: "v0.3.3"
metrics_server_version: "v0.3.2"
metrics_server_image_repo: "gcr.io/google_containers/metrics-server-amd64"
metrics_server_image_tag: "{{ metrics_server_version }}"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.3.2"
local_volume_provisioner_image_tag: "v2.1.0"
cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
cephfs_provisioner_image_tag: "v2.1.0-k8s1.11"
rbd_provisioner_image_repo: "quay.io/external_storage/rbd-provisioner"
@@ -362,7 +278,7 @@ rbd_provisioner_image_tag: "v2.1.1-k8s1.11"
local_path_provisioner_image_repo: "docker.io/rancher/local-path-provisioner"
local_path_provisioner_image_tag: "v0.0.2"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.25.1"
ingress_nginx_controller_image_tag: "0.21.0"
cert_manager_version: "v0.5.2"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
@@ -373,9 +289,6 @@ addon_resizer_image_tag: "{{ addon_resizer_version }}"
dashboard_image_repo: "gcr.io/google_containers/kubernetes-dashboard-{{ image_arch }}"
dashboard_image_tag: "v1.10.1"
image_pull_command: "{{ docker_bin_dir }}/docker pull"
image_info_command: "{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f \"{{ '{{' }} if .RepoTags {{ '}}' }}{{ '{{' }} (index .RepoTags 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}{{ '{{' }} if .RepoDigests {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}\" | tr '\n' ','"
downloads:
netcheck_server:
enabled: "{{ deploy_netchecker }}"
@@ -397,15 +310,13 @@ downloads:
etcd:
container: "{{ etcd_deployment_type != 'host' }}"
file: "{{ etcd_deployment_type == 'host' or etcd_kubeadm_enabled }}"
file: "{{ etcd_deployment_type == 'host' }}"
enabled: true
version: "{{ etcd_version }}"
dest: "{{local_release_dir}}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
repo: "{{ etcd_image_repo }}"
tag: "{{ etcd_image_tag }}"
sha256: >-
{{ etcd_binary_checksum if (etcd_deployment_type == 'host' or etcd_kubeadm_enabled)
else etcd_digest_checksum|d(None) }}
sha256: "{{ etcd_binary_checksum if etcd_deployment_type == 'host' else etcd_digest_checksum|d(None) }}"
url: "{{ etcd_download_url }}"
unarchive: true
owner: "root"
@@ -417,7 +328,7 @@ downloads:
enabled: true
file: true
version: "{{ cni_version }}"
dest: "{{local_release_dir}}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
dest: "{{local_release_dir}}/cni-plugins-{{ image_arch }}-{{ cni_version }}.tgz"
sha256: "{{ cni_binary_checksum }}"
url: "{{ cni_download_url }}"
unarchive: false
@@ -430,7 +341,7 @@ downloads:
enabled: true
file: true
version: "{{ kubeadm_version }}"
dest: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}"
dest: "{{local_release_dir}}/kubeadm"
sha256: "{{ kubeadm_binary_checksum }}"
url: "{{ kubeadm_download_url }}"
unarchive: false
@@ -443,7 +354,7 @@ downloads:
enabled: true
file: true
version: "{{ kube_version }}"
dest: "{{ local_release_dir }}/hyperkube-{{ kube_version }}-{{ image_arch }}"
dest: "{{ local_release_dir }}/hyperkube"
sha256: "{{ hyperkube_binary_checksum }}"
url: "{{ hyperkube_download_url }}"
unarchive: false
@@ -452,19 +363,6 @@ downloads:
groups:
- k8s-cluster
crictl:
file: true
enabled: "{{ container_manager in ['crio', 'cri', 'containerd'] }}"
version: "{{ crictl_version }}"
dest: "{{local_release_dir}}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz"
sha256: "{{ crictl_binary_checksum }}"
url: "{{ crictl_download_url }}"
unarchive: true
owner: "root"
mode: "0755"
groups:
- k8s-cluster
cilium:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
@@ -483,15 +381,6 @@ downloads:
groups:
- k8s-cluster
cilium_operator:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_operator_image_repo }}"
tag: "{{ cilium_operator_image_tag }}"
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
groups:
- k8s-cluster
multus:
enabled: "{{ kube_network_plugin_multus }}"
container: true
@@ -622,42 +511,6 @@ downloads:
groups:
- k8s-cluster
kube_ovn_db:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_db_image_repo }}"
tag: "{{ kube_ovn_db_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_node:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_node_image_repo }}"
tag: "{{ kube_ovn_node_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_controller:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_controller_image_repo }}"
tag: "{{ kube_ovn_controller_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_cni:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_cni_image_repo }}"
tag: "{{ kube_ovn_cni_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_router:
enabled: "{{ kube_network_plugin == 'kube-router' }}"
container: true

Some files were not shown because too many files have changed in this diff Show More