Compare commits

..

7 Commits

Author SHA1 Message Date
Andreas Holmsten
7d8da8348e Cherry pick #4857 #4859 #4867 into release-2.10 (#4877)
* fix start CoreDNS when init secondary master (#4867)

* Update dns-autoscaler.yml.j2 (#4857)

Merge two tolerations.  because the latest tolerations will cover the first tolerations.

* Remove GCE tests and CNCF funding ended (#4859)
2019-06-13 05:22:17 -07:00
Andreas Holmsten
b90b1fc2b9 updated pinning to prevent breaking changes (#4783) (#4873)
* updated ansible pinning to prevent more possibilities of breaking changes

* more exact pinning of ansible version

* more exact pinning of ansible version and also all the rest

* added testing requirements.txt pinning settings

* removed boto from testing requirements.txt
2019-06-13 02:36:19 -07:00
Andreas Holmsten
147ea54374 Cherry pick #4861 into release-2.10 (#4874)
* Rebase only on PRs (#4861)

* Rebase from release-2.10 branch instead of master
2019-06-12 23:06:13 -07:00
Bort Verwilst
d53782a7f1 k8s 1.14.3 (#4855) 2019-06-09 03:41:05 -07:00
Bort Verwilst
e2f5a9748e upgrade to 1.14.2 (#4782)
* upgrade to 1.14.2

* Remove trailing whitespace
2019-05-20 06:01:15 -07:00
Andreas Krüger
0d1a34ee6b Merge pull request #4718 from lystor/bug-4695
Fix adding output of kubeadm to the admin.conf downloaded to the arti…
2019-05-08 14:52:19 +02:00
lystor
28ad0e676d Fix adding output of kubeadm to the admin.conf downloaded to the artifacts directory (#4696)
Fixes issue https://github.com/kubernetes-sigs/kubespray/issues/4695
2019-05-07 12:55:54 +03:00
404 changed files with 3228 additions and 6512 deletions

View File

@@ -3,23 +3,14 @@ parseable: true
skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
# These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
- '204'
- '206'
- '301'
- '305'
- '306'
- '404'
- '502'
- '503'
# These rules are intentionally skipped:
#
# [E204]: "Lines should be no longer than 160 chars"
# This could be re-enabled with a major rewrite in the future.
# For now, there's not enough value gain from strictly limiting line length.
# (Disabled in May 2019)
- '204'
# [E701]: "meta/main.yml should contain relevant info"
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
# While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019)
- '504'
- '701'

View File

@@ -1,8 +1,8 @@
---
stages:
- unit-tests
- deploy-part1
- moderator
- deploy-part1
- deploy-part2
- deploy-gce
- deploy-special
@@ -37,7 +37,7 @@ before_script:
tags:
- packet
variables:
KUBESPRAY_VERSION: v2.10.0
KUBESPRAY_VERSION: v2.9.0
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
.testcases: &testcases
@@ -60,8 +60,6 @@ ci-authorized:
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
# Disable ci moderator
only: []
include:
- .gitlab-ci/lint.yml

View File

@@ -20,8 +20,6 @@
<<: *gce_variables
tags:
- gce
except: ['triggers']
only: [/^pr-.*$/]
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-part1
@@ -38,6 +36,8 @@ gce_ubuntu18-flannel-aio:
stage: deploy-part1
<<: *gce
when: manual
except: ['triggers']
only: [/^pr-.*$/]
### PR JOBS PART2
@@ -45,11 +45,15 @@ gce_coreos-calico-aio:
stage: deploy-gce
<<: *gce
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
gce_centos7-flannel-addons:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: [/^pr-.*$/]
### MANUAL JOBS
@@ -60,42 +64,36 @@ gce_centos-weave-kubeadm-sep:
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
except: []
gce_ubuntu-weave-sep:
stage: deploy-gce
<<: *gce
when: manual
only: ['triggers']
except: []
gce_coreos-calico-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-canal-ha-triggers:
stage: deploy-special
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-flannel-addons-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-weave-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
# More builds for PRs/merges (manual) and triggers (auto)
@@ -104,23 +102,27 @@ gce_ubuntu-canal-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-canal-kubeadm-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_ubuntu-flannel-ha:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
gce_centos-weave-kubeadm-triggers:
stage: deploy-gce
@@ -129,87 +131,99 @@ gce_centos-weave-kubeadm-triggers:
<<: *centos_weave_kubeadm_variables
when: on_success
only: ['triggers']
except: []
gce_ubuntu-contiv-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-cilium:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu18-cilium-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-weave-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_debian9-calico-upgrade:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_debian9-calico-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_coreos-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-canal-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_rhel7-canal-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_rhel7-canal-sep-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-calico-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-calico-ha-triggers:
stage: deploy-gce
<<: *gce
when: on_success
only: ['triggers']
except: []
gce_centos7-kube-router:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_centos7-multus-calico:
stage: deploy-gce
@@ -217,11 +231,6 @@ gce_centos7-multus-calico:
variables:
<<: *centos7_multus_calico_variables
when: manual
gce_oracle-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
@@ -229,19 +238,27 @@ gce_opensuse-canal:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_coreos-kube-router:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-kube-router-sep:
stage: deploy-special
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

View File

@@ -6,15 +6,6 @@ yamllint:
- yamllint --strict .
except: ['triggers', 'master']
vagrant-validate:
extends: .job
stage: unit-tests
script:
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
- vagrant validate --ignore-provider
except: ['triggers', 'master']
ansible-lint:
extends: .job
stage: unit-tests

View File

@@ -9,8 +9,6 @@
<<: *packet_variables
tags:
- packet
only: [/^pr-.*$/]
except: ['triggers']
.test-upgrade: &test-upgrade
variables:
@@ -20,6 +18,8 @@ packet_ubuntu18-calico-aio:
stage: deploy-part1
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
# ### PR JOBS PART2
@@ -27,6 +27,8 @@ packet_centos7-flannel-addons:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
# ### MANUAL JOBS
@@ -35,14 +37,12 @@ packet_centos-weave-kubeadm-sep:
<<: *packet
when: on_success
only: ['triggers']
except: []
packet_ubuntu-weave-sep:
stage: deploy-part2
<<: *packet
when: manual
only: ['triggers']
except: []
# # More builds for PRs/merges (manual) and triggers (auto)
@@ -50,73 +50,74 @@ packet_ubuntu-canal-ha:
stage: deploy-special
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-canal-kubeadm:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-flannel-ha:
stage: deploy-part2
<<: *packet
when: manual
when: on_success
except: ['triggers']
packet_ubuntu-contiv-sep:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: on_success
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu18-cilium-sep:
stage: deploy-special
<<: *packet
when: manual
packet_ubuntu18-flannel-containerd:
stage: deploy-part2
<<: *packet
when: manual
packet_debian9-macvlan-sep:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_debian9-calico-upgrade:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-calico-ha:
stage: deploy-part2
<<: *packet
when: manual
packet_centos7-kube-ovn:
stage: deploy-part2
<<: *packet
when: on_success
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-kube-router:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: on_success
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_centos7-multus-calico:
stage: deploy-part2
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_opensuse-canal:
stage: deploy-part2
<<: *packet
when: manual
packet_oracle-7-canal:
stage: deploy-part2
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-kube-router-sep:
stage: deploy-part2
stage: deploy-special
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

View File

@@ -4,51 +4,49 @@
extends: .job
before_script:
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
- ./tests/scripts/terraform_install.sh
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Install Terraform
- apt-get install -y unzip
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
# Prepare inventory
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
- ln -s contrib/terraform/$PROVIDER/hosts
- terraform init contrib/terraform/$PROVIDER
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
- cd inventory/$CLUSTER
- ln -s ../../contrib/terraform/$PROVIDER/hosts
- terraform init ../../contrib/terraform/$PROVIDER
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
only: ['master', /^pr-.*$/]
.terraform_validate:
extends: .terraform_install
stage: unit-tests
only: ['master', /^pr-.*$/]
script:
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
- terraform fmt -check -diff contrib/terraform/$PROVIDER
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
.terraform_apply:
extends: .terraform_install
stage: deploy-part2
when: manual
only: [/^pr-.*$/]
variables:
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
ANSIBLE_INVENTORY: hosts
CI_PLATFORM: tf
TF_VAR_ssh_user: $SSH_USER
TF_VAR_cluster_name: $CI_JOB_ID
script:
- tests/scripts/testcases_run.sh
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
- ansible-playbook -i hosts ../../cluster.yml --become
after_script:
# Cleanup regardless of exit code
- ./tests/scripts/testcases_cleanup.sh
- cd inventory/$CLUSTER
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: 0.12.6
TF_VERSION: 0.11.11
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
@@ -72,6 +70,7 @@ tf-packet-ubuntu16-default:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_plan_k8s_masters: t1.small.x86
@@ -86,6 +85,7 @@ tf-packet-ubuntu18-default:
TF_VERSION: 0.11.11
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_plan_k8s_masters: t1.small.x86
@@ -105,16 +105,15 @@ tf-packet-ubuntu18-default:
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
tf-ovh_ubuntu18-calico:
tf-apply-ovh:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
TF_VERSION: 0.11.11
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
@@ -132,31 +131,3 @@ tf-ovh_ubuntu18-calico:
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-ovh_coreos-calico:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.6
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: core
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_image: "CoreOS Stable"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -4,8 +4,8 @@ RUN mkdir /kubespray
WORKDIR /kubespray
RUN apt update -y && \
apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq \
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
libssl-dev python-dev sshpass apt-transport-https jq \
ca-certificates curl gnupg2 software-properties-common python-pip
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
@@ -13,6 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
stable" \
&& apt update -y && apt-get install docker-ce -y
COPY . .
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl \
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl

View File

@@ -18,4 +18,3 @@ aliases:
- chapsuk
- mirwan
- miouge1
- holmsten

View File

@@ -36,9 +36,9 @@ To deploy the cluster you can use :
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# The option `-b` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
# Without -b the playbook will fail to run!
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
@@ -101,7 +101,6 @@ Supported Linux Distributions
- **Fedora** 28
- **Fedora/CentOS** Atomic
- **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7
Note: Upstart/SysV init based OS types are not supported.
@@ -109,33 +108,32 @@ Supported Components
--------------------
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
- [etcd](https://github.com/coreos/etcd) v3.3.10
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.3
- [etcd](https://github.com/coreos/etcd) v3.2.26
- [docker](https://www.docker.com/) v18.06 (see note)
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
- [calico](https://github.com/projectcalico/calico) v3.7.3
- [calico](https://github.com/projectcalico/calico) v3.4.0
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.5.5
- [cilium](https://github.com/cilium/cilium) v1.3.0
- [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
- [multus](https://github.com/intel/multus-cni) v3.2.1
- [weave](https://github.com/weaveworks/weave) v2.5.2
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
- [weave](https://github.com/weaveworks/weave) v2.5.1
- Application
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
- [coredns](https://github.com/coredns/coredns) v1.6.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
- [coredns](https://github.com/coredns/coredns) v1.5.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Requirements
------------
- **Minimum required version of Kubernetes is v1.14**
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
- **Ansible v2.7.8 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
@@ -158,7 +156,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
Network Plugins
---------------
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
@@ -172,17 +170,13 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
The choice is defined with the variable `kube_network_plugin`. There is also an

15
Vagrantfile vendored
View File

@@ -21,11 +21,10 @@ SUPPORTED_OS = {
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
}
# Defaults for config options defined in CONFIG
@@ -181,17 +180,9 @@ Vagrant.configure("2") do |config|
"flannel_interface": "eth1",
"kube_network_plugin": $network_plugin,
"kube_network_plugin_multus": $multi_networking,
"download_run_once": "True",
"download_localhost": "False",
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
# Make kubespray cache even when download_run_once is false
"download_force_cache": "True",
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
"download_keep_remote_cache": "False",
"docker_keepcache": "1",
# These two settings will put kubectl and admin.config in $inventory/artifacts
"kubeconfig_localhost": "True",
"kubectl_localhost": "True",
"download_run_once": "False",
"download_localhost": "False",
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user]

View File

@@ -4,8 +4,6 @@ ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
force_valid_group_names = ignore
host_key_checking=False
gathering = smart

View File

@@ -19,14 +19,27 @@
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: false
pre_tasks:
- name: gather facts from all instances
setup:
delegate_to: "{{item}}"
delegate_facts: true
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
run_once: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
@@ -39,23 +52,13 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: not etcd_kubeadm_enabled| default(false)
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when: not etcd_kubeadm_enabled| default(false)
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -79,12 +82,6 @@
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
- hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
@@ -101,6 +98,12 @@
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:

View File

@@ -42,11 +42,8 @@ class SearchEC2Tags(object):
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
cluster_name = os.getenv('CLUSTER_NAME')
if cluster_name:
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
instances = ec2.instances.filter(Filters=filters)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
##Suppose default vpc_visibility is private

View File

@@ -4,11 +4,8 @@
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd
- name: Set vm_list
set_fact:
- set_fact:
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -8,22 +8,9 @@
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd
- name: Set VM IP, roles lists and load balancer public IP
set_fact:
- set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
- name: Generate Load Balancer variables
template:
src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,8 +0,0 @@
## External LB example config
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
loadbalancer_apiserver:
address: {{ lb_pubip.ipAddress }}
port: 6443
## Internal loadbalancers for apiservers
loadbalancer_apiserver_localhost: false

View File

@@ -29,7 +29,7 @@ sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
imageReference:
publisher: "OpenLogic"
offer: "CentOS"
sku: "7.5"
sku: "7.2"
version: "latest"
imageReferenceJson: "{{imageReference|to_json}}"

View File

@@ -1,18 +1,10 @@
---
- name: Set base_dir
set_fact:
- set_fact:
base_dir: "{{playbook_dir}}/.generated/"
- name: Create base_dir
file:
path: "{{ base_dir }}"
state: directory
recurse: true
- file: path={{base_dir}} state=directory recurse=true
- name: Store json files in base_dir
template:
src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}"
- template: src={{item}} dest="{{base_dir}}/{{item}}"
with_items:
- network.json
- storage.json

View File

@@ -59,7 +59,6 @@ def get_var_as_bool(name, default):
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
# Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
@@ -97,10 +96,9 @@ class KubesprayInventory(object):
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_master(list(self.hosts.keys())[
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
self.set_kube_master(list(self.hosts.keys())[etcd_hosts_count:5])
else:
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
self.set_kube_master(list(self.hosts.keys())[:2])
self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])

View File

@@ -2,11 +2,9 @@
```
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that dont run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
```
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
## Install
```
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
```

View File

@@ -1,12 +1,6 @@
---
metallb:
ip_range: "10.5.0.50-10.5.0.99"
protocol: "layer2"
# additional_address_pools:
# kube_service_pool:
# ip_range: "10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
limits:
cpu: "100m"
memory: "100Mi"

View File

@@ -8,14 +8,6 @@ data:
config: |
address-pools:
- name: loadbalanced
protocol: {{ metallb.protocol }}
protocol: layer2
addresses:
- {{ metallb.ip_range }}
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
- name: {{ pool }}
protocol: {{ metallb.additional_address_pools[pool].protocol }}
addresses:
- {{ metallb.additional_address_pools[pool].ip_range }}
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
{% endfor %}
{% endif %}

View File

@@ -1,15 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,8 +1,6 @@
---
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
template:
src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}"
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}

View File

@@ -14,5 +14,3 @@ ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contr
```
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
```
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system

View File

@@ -4,7 +4,6 @@
register: "initial_heketi_state"
changed_when: false
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
- name: "Bootstrap heketi."
when:
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
@@ -17,20 +16,15 @@
register: "initial_heketi_pod"
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
changed_when: false
- name: "Ensure heketi bootstrap pod is up."
assert:
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
- name: Store the initial heketi pod name
set_fact:
- set_fact:
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
- name: "Test heketi topology."
changed_when: false
register: "heketi_topology"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
- name: "Load heketi topology."
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
include_tasks: "bootstrap/topology.yml"
@@ -48,7 +42,6 @@
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
changed_when: false
register: "heketi_storage_state"
# ensure endpoints actually exist before trying to move database data to it
- name: "Create heketi storage."
include_tasks: "bootstrap/storage.yml"

View File

@@ -1,19 +1,11 @@
---
- name: Get storage nodes
register: "label_present"
- register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: "Assign storage label"
when: "label_present.stdout_lines|length == 0"
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
- name: Get storage nodes again
register: "label_present"
- register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: Ensure the label has been set
assert:
that: "label_present|length > 0"
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }

View File

@@ -1,24 +1,19 @@
---
- name: "Kubernetes Apps | Lay Down Heketi"
become: true
template:
src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json"
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi"
kube:
name: "GlusterFS"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{ kube_config_dir }}/heketi-deployment.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"
- name: "Ensure heketi is up and running."
changed_when: false
register: "heketi_state"
vars:
heketi_state:
stdout: "{}"
heketi_state: { stdout: "{}" }
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
@@ -27,7 +22,5 @@
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
retries: 60
delay: 5
- name: Set the Heketi pod name
set_fact:
- set_fact:
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"

View File

@@ -1,44 +1,31 @@
---
- name: Get clusterrolebindings
register: "clusterrolebinding_state"
- register: "clusterrolebinding_state"
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
changed_when: false
- name: "Kubernetes Apps | Deploy cluster role binding."
when: "clusterrolebinding_state.stdout == \"\""
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
- name: Get clusterrolebindings again
register: "clusterrolebinding_state"
- register: "clusterrolebinding_state"
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
changed_when: false
- name: Make sure that clusterrolebindings are present now
assert:
- assert:
that: "clusterrolebinding_state.stdout != \"\""
msg: "Cluster role binding is not present."
- name: Get the heketi-config-secret secret
register: "secret_state"
- register: "secret_state"
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
changed_when: false
- name: "Render Heketi secret configuration."
become: true
template:
src: "heketi.json.j2"
dest: "{{ kube_config_dir }}/heketi.json"
- name: "Deploy Heketi config secret"
when: "secret_state.stdout == \"\""
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
- name: Get the heketi-config-secret secret again
register: "secret_state"
- register: "secret_state"
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
changed_when: false
- name: Make sure the heketi-config-secret secret exists now
assert:
- assert:
that: "secret_state.stdout != \"\""
msg: "Heketi config secret is not present."

View File

@@ -56,7 +56,7 @@
"serviceAccountName": "heketi-service-account",
"containers": [
{
"image": "heketi/heketi:9",
"image": "heketi/heketi:7",
"imagePullPolicy": "Always",
"name": "deploy-heketi",
"env": [

View File

@@ -68,7 +68,7 @@
"serviceAccountName": "heketi-service-account",
"containers": [
{
"image": "heketi/heketi:9",
"image": "heketi/heketi:7",
"imagePullPolicy": "Always",
"name": "heketi",
"env": [

View File

@@ -1,2 +0,0 @@
---
heketi_remove_lvm: false

View File

@@ -14,8 +14,6 @@
when: "ansible_os_family == 'Debian'"
- name: "Get volume group information."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
register: "volume_groups"
@@ -23,16 +21,12 @@
changed_when: false
- name: "Remove volume groups."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
command: "vgremove {{ volume_group }} --yes"
with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" }
- name: "Remove physical volume from cluster disks."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
command: "pvremove {{ disk_volume_device_1 }} --yes"
ignore_errors: true
@@ -42,11 +36,11 @@
yum:
name: "lvm2"
state: "absent"
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
when: "ansible_os_family == 'RedHat'"
- name: "Remove lvm utils (Debian)"
become: true
apt:
name: "lvm2"
state: "absent"
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
when: "ansible_os_family == 'Debian'"

View File

@@ -1,5 +1,4 @@
.terraform
*.tfvars
!sample-inventory\/cluster.tfvars
*.tfstate
*.tfstate.backup

View File

@@ -16,13 +16,14 @@ most modern installs of OpenStack that support the basic services.
- [ELASTX](https://elastx.se/)
- [EnterCloudSuite](https://www.entercloudsuite.com/)
- [FugaCloud](https://fuga.cloud/)
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
- [OVH](https://www.ovh.com/)
- [Rackspace](https://www.rackspace.com/)
- [Ultimum](https://ultimum.io/)
- [VexxHost](https://vexxhost.com/)
- [Zetta](https://www.zetta.io/)
### Known incompatible public clouds
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
## Approach
The terraform configuration inspects variables found in
@@ -69,7 +70,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
- you already have a suitable OS image in Glance
- you already have a floating IP pool created
@@ -219,7 +220,7 @@ set OS_PROJECT_DOMAIN_NAME=Default
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|Variable | Description |
|---------|-------------|
@@ -245,7 +246,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
#### Terraform state files
@@ -276,7 +276,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
```
if you chose to create a bastion host, this script will create
@@ -290,7 +290,7 @@ pick it up automatically.
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
@@ -325,30 +325,6 @@ $ ssh-add ~/.ssh/id_rsa
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
#### Metadata variables
The [python script](../terraform.py) that reads the
generated`.tfstate` file to generate a dynamic inventory recognizes
some variables within a "metadata" block, defined in a "resource"
block (example):
```
resource "openstack_compute_instance_v2" "example" {
...
metadata {
ssh_user = "ubuntu"
prefer_ipv6 = true
python_bin = "/usr/bin/python3"
}
...
}
```
As the example shows, these let you define the SSH username for
Ansible, a Python binary which is needed by Ansible if
`/usr/bin/python` doesn't exist, and whether the IPv6 address of the
instance should be preferred over IPv4.
#### Bastion host
Bastion access will be determined by:
@@ -415,11 +391,6 @@ kube_network_plugin: flannel
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
- Set max amount of attached cinder volume per host (default 256)
```
node_volume_attach_limit: 26
```
### Deploy Kubernetes

View File

@@ -3,7 +3,7 @@ provider "openstack" {
}
module "network" {
source = "./modules/network"
source = "modules/network"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
@@ -14,7 +14,7 @@ module "network" {
}
module "ips" {
source = "./modules/ips"
source = "modules/ips"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
@@ -27,7 +27,7 @@ module "ips" {
}
module "compute" {
source = "./modules/compute"
source = "modules/compute"
cluster_name = "${var.cluster_name}"
az_list = "${var.az_list}"
@@ -63,7 +63,6 @@ module "compute" {
supplementary_master_groups = "${var.supplementary_master_groups}"
supplementary_node_groups = "${var.supplementary_node_groups}"
worker_allowed_ports = "${var.worker_allowed_ports}"
wait_for_floatingip = "${var.wait_for_floatingip}"
network_id = "${module.network.router_id}"
}

View File

@@ -22,20 +22,20 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
resource "openstack_networking_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
count = "${var.number_of_bastions != "" ? 1 : 0}"
count = "${var.number_of_bastions ? 1 : 0}"
description = "${var.cluster_name} - Bastion Server"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "bastion" {
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
}
resource "openstack_networking_secgroup_v2" "k8s" {
@@ -99,7 +99,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
"${openstack_networking_secgroup_v2.bastion.name}",
]
metadata = {
@@ -109,7 +109,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -136,7 +136,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -163,7 +163,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -257,7 +257,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -288,14 +288,12 @@ resource "openstack_compute_floatingip_associate_v2" "bastion" {
count = "${var.number_of_bastions}"
floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
count = "${var.number_of_k8s_masters}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
@@ -308,7 +306,6 @@ resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
count = "${var.number_of_k8s_nodes}"
floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {

View File

@@ -82,8 +82,6 @@ variable "k8s_allowed_egress_ips" {
type = "list"
}
variable "wait_for_floatingip" {}
variable "supplementary_master_groups" {
default = ""
}

View File

@@ -1,5 +1,5 @@
resource "null_resource" "dummy_dependency" {
triggers = {
triggers {
dependency_id = "${var.router_id}"
}
}

View File

@@ -1,15 +1,15 @@
output "k8s_master_fips" {
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
}
output "k8s_master_no_etcd_fips" {
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_master_no_etcd.*.address}"]
}
output "k8s_node_fips" {
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
}
output "bastion_fips" {
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
}

View File

@@ -14,7 +14,7 @@ resource "openstack_networking_network_v2" "k8s" {
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
count = "${var.use_neutron}"
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
network_id = "${openstack_networking_network_v2.k8s.id}"
cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
@@ -22,6 +22,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
resource "openstack_networking_router_interface_v2" "k8s" {
count = "${var.use_neutron}"
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
router_id = "${openstack_networking_router_v2.k8s.id}"
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
}

View File

@@ -125,11 +125,6 @@ variable "floatingip_pool" {
default = "external"
}
variable "wait_for_floatingip" {
description = "Terraform will poll the instance until the floating IP has been associated."
default = "false"
}
variable "external_net" {
description = "uuid of the external/public network"
}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
#
# Copyright 2015 Cisco Systems, Inc.
#
@@ -20,15 +20,15 @@
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
import random
from functools import wraps
import json
import os
import re
VERSION = '0.4.0pre'
VERSION = '0.3.0pre'
def tfstates(root=None):
@@ -38,58 +38,15 @@ def tfstates(root=None):
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def convert_to_v3_structure(attributes, prefix=''):
""" Convert the attributes from v4 to v3
Receives a dict and return a dictionary """
result = {}
if isinstance(attributes, str):
# In the case when we receive a string (e.g. values for security_groups)
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
for key, value in attributes.items():
if isinstance(value, list):
if len(value):
result['{}{}.#'.format(prefix, key, hash)] = len(value)
for i, v in enumerate(value):
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
elif isinstance(value, dict):
result['{}{}.%'.format(prefix, key)] = len(value)
for k, v in value.items():
result['{}{}.{}'.format(prefix, key, k)] = v
else:
result['{}{}'.format(prefix, key)] = value
return result
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
tf_version = state['version']
if tf_version == 3:
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
elif tf_version == 4:
# In version 4 the structure changes so we need to iterate
# each instance inside the resource branch.
for resource in state['resources']:
name = resource['module'].split('.')[-1]
for instance in resource['instances']:
key = "{}.{}".format(resource['type'], resource['name'])
if 'index_key' in instance:
key = "{}.{}".format(key, instance['index_key'])
data = {}
data['type'] = resource['type']
data['provider'] = resource['provider']
data['depends_on'] = instance.get('depends_on', [])
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
if 'id' in instance['attributes']:
data['primary']['id'] = instance['attributes']['id']
data['primary']['meta'] = instance['attributes'].get('meta',{})
yield name, key, data
else:
raise KeyError('tfstate version %d not supported' % tf_version)
## READ RESOURCES
PARSERS = {}
@@ -152,7 +109,7 @@ def calculate_mantl_vars(func):
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in list(source.items()):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
@@ -170,7 +127,7 @@ def parse_attr_list(source, prefix, sep='.'):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return list(attrs.values())
return attrs.values()
def parse_dict(source, prefix, sep='.'):
@@ -282,12 +239,6 @@ def openstack_host(resource, module_name):
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
attrs.update({
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
'publicly_routable': True,
})
else:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
@@ -301,9 +252,9 @@ def openstack_host(resource, module_name):
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
device_index = 1
for key, value in list(raw_attrs.items()):
for key, value in raw_attrs.items():
match = re.search("^volume.*.device$", key)
if match:
attrs['disk_volume_device_'+str(device_index)] = value
@@ -321,7 +272,7 @@ def openstack_host(resource, module_name):
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in list(attrs['metadata'].items()))
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl

View File

@@ -32,7 +32,7 @@ The name of the resource group your instances are in, can be retrieved via `azur
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
#### azure\_subnet\_name
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
#### azure\_security\_group\_name
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
@@ -40,14 +40,14 @@ The name of the network security group your instances are in, can be retrieved v
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
These will have to be generated first:
- Create an Azure AD Application with:
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
display name, identifier-uri, homepage and the password can be choosen
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
The name, identifier-uri, home-page and the password can be choosen
Note the AppId in the output.
- Create Service principal for the application with:
`azure ad sp create --id AppId`
`azure ad sp create --applicationId AppId`
This is the AppId from the last command
- Create the role assignment with:
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.

View File

@@ -119,13 +119,13 @@ recommended here:
You need to edit your inventory and add:
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
group of `k8s-cluster` group.
* `calico-rr` group with nodes in it. At the moment it's incompatible with
`kube-node` due to BGP port conflict with `calico-node` container. So you
should not have nodes in both `calico-rr` and `kube-node` groups.
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with standalone route reflectors:
Here's an example of Kubespray inventory with route reflectors:
```
[all]
@@ -154,7 +154,6 @@ node5
[k8s-cluster:children]
kube-node
kube-master
calico-rr
[calico-rr]
rr0

View File

@@ -114,12 +114,10 @@ The only exception is that ``hostNetwork: true`` PODs and non-k8s managed contai
cluster service names.
## Nodelocal DNS cache
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns / core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
Limitations
-----------
@@ -131,7 +129,9 @@ Limitations
* There is
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
for the SkyDNS ``ndots`` param.
for the SkyDNS ``ndots`` param via an
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
add-on, while SkyDNS supports it though.
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
length. Due to default ``svc, default.svc`` subdomains, the actual

View File

@@ -3,22 +3,23 @@ Downloading binaries and containers
Kubespray supports several download/upload modes. The default is:
* Each node downloads binaries and container images on its own, which is ``download_run_once: False``.
* Each node downloads binaries and container images on its own, which is
``download_run_once: False``.
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
* For system managed containers, like kubelet or etcd, pull policy is ``download_always_pull: False``, which is pull if only the wanted repo and tag/sha256 digest differs from that the host has.
* For system managed containers, like kubelet or etcd, pull policy is
``download_always_pull: False``, which is pull if only the wanted repo and
tag/sha256 digest differs from that the host has.
There is also a "pull once, push many" mode as well:
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
On caching:
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
* Override the ``download_run_once: True`` to download container images only once
then push to cluster nodes in batches. The default delegate node
for pushing images is the first `kube-master`.
* If your ansible runner node (aka the admin node) have password-less sudo and
docker enabled, you may want to define the ``download_localhost: True``, which
makes that node a delegate for pushing images while running the deployment with
ansible. This maybe the case if cluster nodes cannot access each over via ssh
or you want to use local docker images as a cache for multiple clusters.
Container images and binary files are described by the vars like ``foo_version``,
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
@@ -28,14 +29,15 @@ Container images may be defined by its repo and tag, for example:
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
Note, the SHA256 digest and the image tag must be both specified and correspond
Note, the sha256 digest and the image tag must be both specified and correspond
to each other. The given example above is represented by the following vars:
```yaml
```
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
dnsmasq_image_repo: andyshinn/dnsmasq
dnsmasq_image_tag: '2.72'
```
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
The full list of available vars may be found in the download's ansible role defaults.
Those also allow to specify custom urls and local repositories for binaries and container
images as well. See also the DNS stack docs for the related intranet configuration,
so the hosts can resolve those urls and repos.
@@ -44,7 +46,7 @@ so the hosts can resolve those urls and repos.
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
NOTE: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
* `pyrepo_index` (and optionally `pyrepo_cert`)
* Depending on the `container_manager`
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)

View File

@@ -51,27 +51,20 @@ You may want to add worker, master or etcd nodes to your existing cluster. This
Remove nodes
------------
You may want to remove **master**, **worker**, or **etcd** nodes from your
existing cluster. This can be done by re-running the `remove-node.yml`
playbook. First, all specified nodes will be drained, then stop some
kubernetes services and delete some certificates,
and finally execute the kubectl command to delete these nodes.
This can be combined with the add node function. This is generally helpful
when doing something like autoscaling your clusters. Of course, if a node
is not working, you can remove the node and install it again.
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
--private-key=~/.ssh/private_key
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
```
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
--private-key=~/.ssh/private_key \
--extra-vars "node=nodename,nodename2"
```
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
to skip the node reset step. If one node is unavailable, but others you wish
to remove are able to connect via SSH, you could set reset_nodes=no as a host
var in inventory.
Connecting to Kubernetes
------------------------

View File

@@ -1,48 +0,0 @@
Kube-OVN
===========
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
## How to use it
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
```
...
kube_network_plugin: kube-ovn
...
```
## Verifying kube-ovn install
Kube-OVN run ovn and controller in `kube-ovn` namespace
* Check the status of kube-ovn pods
```
# From the CLI
kubectl get pod -n kube-ovn
# Output
NAME READY STATUS RESTARTS AGE
kube-ovn-cni-49lsm 1/1 Running 0 2d20h
kube-ovn-cni-9db8f 1/1 Running 0 2d20h
kube-ovn-cni-wftdk 1/1 Running 0 2d20h
kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h
ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h
ovs-ovn-hqn8p 1/1 Running 0 4d16h
ovs-ovn-hvpl8 1/1 Running 0 4d16h
ovs-ovn-r5frh 1/1 Running 0 4d16h
```
* Check the default and node subnet
```
# From the CLI
kubectl get subnet
# Output
NAME PROTOCOL CIDR PRIVATE NAT
join IPv4 100.64.0.0/16 false false
ovn-default IPv4 10.16.0.0/16 false true
```

View File

@@ -1,48 +0,0 @@
Macvlan
===============
How to use it :
-------------
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
```
...
kube_network_plugin: macvlan
...
```
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
```
all:
hosts:
node1:
ip: 10.2.2.1
access_ip: 10.2.2.1
ansible_host: 10.2.2.1
macvlan_interface: ens5
```
Issue encountered :
-------------
- Service DNS
reply from unexpected source:
add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
- Disable nodelocaldns
The nodelocal dns IP is not reacheable.
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
```
enable_nodelocaldns: false
```

View File

@@ -13,7 +13,6 @@ Kubespray's roadmap
- [ ] GCE
- [x] AWS (contrib/terraform/aws)
- [x] Openstack (contrib/terraform/openstack)
- [x] Packet
- [ ] Digital Ocean
- [ ] Azure
- [ ] On AWS autoscaling, multi AZ
@@ -24,11 +23,11 @@ Kubespray's roadmap
https://github.com/kubernetes/kubernetes/issues/18112)
### Tests
- [x] Run kubernetes e2e tests
- [ ] Run kubernetes e2e tests
- [ ] Test idempotency on single OS but for all network plugins/container engines
- [ ] single test on AWS per day
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
- [x] Reorganize CI test vars into group var files
- [ ] Reorganize CI test vars into group var files
### Lifecycle
- [ ] Upgrade granularity: select components to upgrade and skip others
@@ -43,10 +42,23 @@ Kubespray's roadmap
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
### Addons (helm or native ansible)
- [x] Helm
- [x] Ingress-nginx
- [x] kubernetes-dashboard
Include optionals deployments to init the cluster:
##### Monitoring
- Heapster / Grafana ....
- **Prometheus**
##### Others
##### Dashboards:
- kubernetes-dashboard
- Fabric8
- Tectonic
- Cockpit
##### Paas like
- Openshift Origin
- Openstack
- Deis Workflow
### Others
- Organize and update documentation (split in categories)

View File

@@ -1,129 +1,69 @@
Introduction
============
Vagrant Install
=================
Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`.
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
with vmware, but is untested) you should be able to launch a 3 node
Kubernetes cluster by simply running `$ vagrant up`.<br />
To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time.
This will spin up 3 VMs and install kubernetes on them. Once they are
completed you can connect to any of them by running <br />
`$ vagrant ssh k8s-0[1..3]`.
For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well.
```
$ vagrant up
Bringing machine 'k8s-01' up with 'virtualbox' provider...
Bringing machine 'k8s-02' up with 'virtualbox' provider...
Bringing machine 'k8s-03' up with 'virtualbox' provider...
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
...
...
k8s-03: Running ansible-playbook...
PLAY [k8s-cluster] *************************************************************
TASK [setup] *******************************************************************
ok: [k8s-03]
ok: [k8s-01]
ok: [k8s-02]
...
...
PLAY RECAP *********************************************************************
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
$ vagrant ssh k8s-01
vagrant@k8s-01:~$ kubectl get nodes
NAME STATUS AGE
k8s-01 Ready 45s
k8s-02 Ready 45s
k8s-03 Ready 45s
```
Customize Vagrant
=================
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below.
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
or through an override file.
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
e.g.:
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
Use alternative OS for Vagrant
==============================
By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster.
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
operating system for your local cluster.
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
echo '$os = "coreos-stable"' >> vagrant/config.rb
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
File and image caching
======================
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](docs/downloads.md).
Example use of Vagrant
======================
The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>.
```
# use virtualenv to install all python requirements
VENVDIR=venv
virtualenv --python=/usr/bin/python3.7 $VENVDIR
source $VENVDIR/bin/activate
pip install -r requirements.txt
# prepare an inventory to test with
INV=inventory/my_lab
rm -rf ${INV}.bak &> /dev/null
mv ${INV} ${INV}.bak &> /dev/null
cp -a inventory/sample ${INV}
rm -f ${INV}/hosts.ini
# customize the vagrant environment
mkdir vagrant
cat << EOF > vagrant/config.rb
\$instance_name_prefix = "kub"
\$vm_cpus = 1
\$num_instances = 3
\$os = "centos-bento"
\$subnet = "10.0.20"
\$network_plugin = "flannel"
\$inventory = "$INV"
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
EOF
# make the rpm cache
mkdir -p temp/docker_rpms
vagrant up
# make a copy of the downloaded docker rpm, to speed up the next provisioning run
scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/
# copy kubectl access configuration in place
mkdir $HOME/.kube/ &> /dev/null
ln -s $INV/artifacts/admin.conf $HOME/.kube/config
# make the kubectl binary available
sudo ln -s $INV/artifacts/kubectl /usr/local/bin/kubectl
#or
export PATH=$PATH:$INV/artifacts
```
If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible:
```
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
```
If all went well, you check if it's all working as expected:
```
kubectl get nodes
```
The output should look like this:
```
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
kub-1 Ready master 32m v1.14.1
kub-2 Ready master 31m v1.14.1
kub-3 Ready <none> 31m v1.14.1
```
Another nice test is the following:
```
kubectl get po --all-namespaces -o wide
```
Which should yield something like the following:
```
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none>
kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none>
kube-system dns-autoscaler-5fc5fdbf6-5c48k 1/1 Running 0 31m 10.233.66.3 kub-3 <none> <none>
kube-system kube-apiserver-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-apiserver-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kube-controller-manager-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-controller-manager-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kube-flannel-8tgcn 2/2 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system kube-flannel-b2hgt 2/2 Running 0 31m 10.0.20.101 kub-1 <none> <none>
kube-system kube-flannel-zx4bc 2/2 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system kube-proxy-4bjdn 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system kube-proxy-l5tt5 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system kube-proxy-x59q8 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
kube-system kube-scheduler-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
kube-system kube-scheduler-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
kube-system kubernetes-dashboard-6c7466966c-jqz42 1/1 Running 0 31m 10.233.66.4 kub-3 <none> <none>
kube-system nginx-proxy-kub-3 1/1 Running 0 32m 10.0.20.103 kub-3 <none> <none>
kube-system nodelocaldns-2x7vh 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
```
Create clusteradmin rbac and get the login token for the dashboard:
```
kubectl create -f contrib/misc/clusteradmin-rbac.yml
kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$'
```
Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login).

View File

@@ -57,16 +57,10 @@ following default cluster parameters:
10.233.0.0/18). Must not overlap with kube_pods_subnet
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
overlap with kube_service_addresses.
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
on the CoreDNS service.
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
(default is k8s_external.local)
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
on the CoreDNS service.
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
@@ -104,7 +98,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000``
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node.
@@ -124,13 +117,11 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
from the kube-apiserver when the certificate expiration approaches.
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can be defined either as a dict or a comma-separated labels string:
*node_labels* must be defined as a dict:
```
node_labels:
label1_name: label1_value
label2_name: label2_value
node_labels: "label1_name=label1_value,label2_name=label2_value"
```
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
For example, taints can be set in the inventory as variables or more widely in group_vars.

View File

@@ -27,6 +27,12 @@
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:

View File

@@ -12,4 +12,3 @@ node1
[k8s-cluster:children]
kube-node
kube-master
calico-rr

View File

@@ -2,9 +2,6 @@
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Experimental kubeadm etcd deployment mode. Available only for new deployment
etcd_kubeadm_enabled: false
## Directory where the binaries will be installed
bin_dir: /usr/local/bin

View File

@@ -35,8 +35,6 @@ local_volume_provisioner_enabled: false
# local-storage:
# host_dir: /mnt/disks
# mount_dir: /mnt/disks
# volume_mode: Filesystem
# fs_type: ext4
# fast-disks:
# host_dir: /mnt/fast-disks
# mount_dir: /mnt/fast-disks
@@ -80,9 +78,8 @@ rbd_provisioner_enabled: false
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
ingress_publish_status_address: ""
# ingress_nginx_nodeselector:
# beta.kubernetes.io/os: "linux"
# beta.kubernetes.io/os: "linux": ""
# ingress_nginx_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
@@ -97,7 +94,7 @@ ingress_publish_status_address: ""
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/coredns:53"
# 53: "kube-system/kube-dns:53"
# Cert manager deployment
cert_manager_enabled: false

View File

@@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.15.11
kube_version: v1.14.3
# kubernetes image repo define
kube_image_repo: "gcr.io/google-containers"
@@ -134,12 +134,6 @@ dns_mode: coredns
# Enable nodelocal dns cache
enable_nodelocaldns: true
nodelocaldns_ip: 169.254.25.10
nodelocaldns_health_port: 9254
# Enable k8s_external plugin for CoreDNS
enable_coredns_k8s_external: false
coredns_k8s_external_zone: k8s_external.local
# Enable endpoint_pod_names option for kubernetes plugin
enable_coredns_k8s_endpoint_pod_names: false
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
@@ -151,7 +145,7 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
dns_domain: "{{ cluster_name }}"
## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## docker for docker and crio for cri-o.
container_manager: docker
## Settings for containerized control plane (etcd/kubelet/secrets)
@@ -193,18 +187,6 @@ podsecuritypolicy_enabled: false
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
# kubelet_enforce_node_allocatable: pods
## Optionally reserve resources for OS system daemons.
# system_reserved: true
## Uncomment to override default values
# system_memory_reserved: 512M
# system_cpu_reserved: 500m
## Reservation for master hosts
# system_master_memory_reserved: 256M
# system_master_cpu_reserved: 250m
# An alternative flexvolume plugin directory
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
## Supplementary addresses that can be added in kubernetes ssl keys.
## That can be useful for example to setup a keepalived virtual IP
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]

View File

@@ -1,6 +0,0 @@
---
# private interface, on a l2-network
macvlan_interface: "eth1"
# Enable nat in default gateway network interface
enable_nat_default_gateway: true

View File

@@ -28,9 +28,6 @@
# node5
# node6
[calico-rr]
[k8s-cluster:children]
kube-master
kube-node
calico-rr

View File

@@ -1,7 +1,6 @@
---
- hosts: localhost
become: no
gather_facts: no
tasks:
- name: "Check ansible version >=2.7.8"
assert:
@@ -13,8 +12,12 @@
vars:
ansible_connection: local
- hosts: all
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
gather_facts: no
vars_prompt:
name: "delete_nodes_confirmation"
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
@@ -28,20 +31,14 @@
when: delete_nodes_confirmation != "yes"
- hosts: kube-master
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: remove-node/pre-remove, tags: pre-remove }
- hosts: "{{ node | default('kube-node') }}"
gather_facts: no
roles:
- { role: kubespray-defaults }
- { role: reset, tags: reset, when: reset_nodes|default(True) }
- { role: reset, tags: reset }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
gather_facts: no
- hosts: kube-master
roles:
- { role: kubespray-defaults }
- { role: remove-node/post-remove, tags: post-remove }

View File

@@ -1,4 +1,4 @@
ansible==2.7.12
ansible==2.7.8
jinja2==2.10.1
netaddr==0.7.19
pbr==5.2.0

View File

@@ -1,13 +1,11 @@
---
- name: set bastion host IP
set_fact:
- set_fact:
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
delegate_to: localhost
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
- name: Store the current ansible_user in the real_user fact
set_fact:
- set_fact:
real_user: "{{ ansible_user }}"
- name: create ssh bastion conf

View File

@@ -15,4 +15,4 @@ Host {{ bastion_ip }}
ControlPersist 5m
Host {{ vars['hosts'] }}
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
ProxyCommand ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}

View File

@@ -23,6 +23,7 @@ Variables are listed with their default values, if applicable.
* `http_proxy`/`https_proxy`
The role will configure the package manager (if applicable) to download packages via a proxy.
This is currently implemented for CentOS/RHEL (`http_proxy` only) as well as Debian and Ubuntu (both `http_proxy` and `https_proxy` are respected)
* `override_system_hostname: true`
The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).

View File

@@ -19,9 +19,6 @@
- name: Run bootstrap.sh
script: bootstrap.sh
become: true
environment:
http_proxy: "{{ http_proxy | default('') }}"
https_proxy: "{{ https_proxy | default('') }}"
when:
- need_bootstrap.rc != 0

View File

@@ -12,8 +12,8 @@
tags:
- facts
- name: Check http::proxy in apt configuration files
raw: apt-config dump | grep -qsi 'Acquire::http::proxy'
- name: Check http::proxy in /etc/apt/apt.conf
raw: grep -qsi 'Acquire::http::proxy' /etc/apt/apt.conf
register: need_http_proxy
failed_when: false
changed_when: false
@@ -31,8 +31,8 @@
- http_proxy is defined
- need_http_proxy.rc != 0
- name: Check https::proxy in apt configuration files
raw: apt-config dump | grep -qsi 'Acquire::https::proxy'
- name: Check https::proxy in /etc/apt/apt.conf
raw: grep -qsi 'Acquire::https::proxy' /etc/apt/apt.conf
register: need_https_proxy
failed_when: false
changed_when: false

View File

@@ -25,26 +25,6 @@
tags:
- facts
- name: Check if a proxy is set in /etc/dnf/dnf.conf
raw: grep -qs 'proxy=' /etc/dnf/dnf.conf
register: need_http_proxy
failed_when: false
changed_when: false
# This command should always run, even in check mode
check_mode: false
environment: {}
when:
- http_proxy is defined
- name: Add http_proxy to /etc/dnf/dnf.conf if http_proxy is defined
raw: echo 'proxy={{ http_proxy }}' >> /etc/dnf/dnf.conf
become: true
environment: {}
when:
- http_proxy is defined
- need_http_proxy.rc != 0
- not is_atomic
# Fedora's policy as of Fedora 30 is to still install python2 as /usr/bin/python
# See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3 for the current status
- name: Install python on fedora

View File

@@ -1,33 +1,6 @@
---
# OpenSUSE ships with Python installed
- name: Set the http_proxy in /etc/sysconfig/proxy
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^HTTP_PROXY='
line: 'HTTP_PROXY="{{ http_proxy }}"'
become: true
when:
- http_proxy is defined
- name: Set the https_proxy in /etc/sysconfig/proxy
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^HTTPS_PROXY='
line: 'HTTPS_PROXY="{{ https_proxy }}"'
become: true
when:
- https_proxy is defined
- name: Enable proxies
lineinfile:
path: /etc/sysconfig/proxy
regexp: '^PROXY_ENABLED='
line: 'PROXY_ENABLED="yes"'
become: true
when:
- http_proxy is defined or https_proxy is defined
# Without this package, the get_url module fails when trying to handle https
- name: Install python-cryptography
zypper:

View File

@@ -1,21 +0,0 @@
---
- name: Download Oracle Linux public yum repo
get_url:
url: https://yum.oracle.com/public-yum-ol7.repo
dest: /etc/yum.repos.d/public-yum-ol7.repo
- name: Enable Oracle Linux repo
ini_file:
dest: /etc/yum.repos.d/public-yum-ol7.repo
section: "{{ item }}"
option: enabled
value: "1"
with_items:
- ol7_latest
- ol7_addons
- ol7_developer_EPEL
- name: Install packages requirements for bootstrap
yum:
name: container-selinux
state: present

View File

@@ -25,9 +25,6 @@
- include_tasks: bootstrap-opensuse.yml
when: '"openSUSE" in os_release.stdout'
- include_tasks: bootstrap-oracle.yml
when: '"Oracle" in os_release.stdout'
- name: Create remote_tmp for it is used by another module
file:
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
@@ -72,11 +69,3 @@
- ceph-common
state: present
when: rbd_provisioner_enabled|default(false)
- name: Ensure bash_completion.d folder exists
file:
name: /etc/bash_completion.d/
state: directory
owner: root
group: root
mode: 0755

View File

@@ -1,44 +0,0 @@
---
kubelet_cgroup_driver: systemd
containerd_config:
grpc:
max_recv_message_size: 16777216
max_send_message_size: 16777216
debug:
level: ""
registries:
"docker.io": "https://registry-1.docker.io"
max_container_log_line_size: -1
containerd_version: '1.2.6'
containerd_package: 'containerd.io'
containerd_cfg_dir: /etc/containerd
# Path to runc binary
runc_binary: /usr/sbin/runc
yum_repo_dir: /etc/yum.repos.d
yum_conf: /etc/yum.conf
containerd_yum_conf: /etc/yum_containerd.conf
# Optional values for containerd apt repo
containerd_package_info:
pkgs:
containerd_repo_key_info:
repo_keys:
containerd_repo_info:
repos:
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
# Ubuntu docker-ce repo
containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
containerd_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
containerd_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
containerd_ubuntu_repo_component: 'stable'

View File

@@ -1,20 +0,0 @@
---
- name: restart containerd
command: /bin/true
notify:
- Containerd | restart containerd
- Containerd | wait for containerd
- name: Containerd | restart containerd
systemd:
name: containerd
state: restarted
enabled: yes
daemon-reload: yes
- name: Containerd | wait for containerd
command: "{{ containerd_bin_dir }}/ctr images ls -q"
register: containerd_ready
retries: 8
delay: 4
until: containerd_ready.rc == 0

View File

@@ -1,85 +0,0 @@
---
- name: ensure containerd repository public key is installed
action: "{{ containerd_repo_key_info.pkg_key }}"
args:
id: "{{ item }}"
url: "{{ containerd_repo_key_info.url }}"
state: present
register: keyserver_task_result
until: keyserver_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ containerd_repo_key_info.repo_keys }}"
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: ensure containerd repository is enabled
action: "{{ containerd_repo_info.pkg_repo }}"
args:
repo: "{{ item }}"
state: present
with_items: "{{ containerd_repo_info.repos }}"
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- containerd_repo_info.repos|length > 0
# This is required to ensure any apt upgrade will not break kubernetes
- name: Set containerd pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_containerd.j2"
dest: "/etc/apt/preferences.d/containerd"
owner: "root"
mode: 0644
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: Configure containerd repository on Fedora
template:
src: "fedora_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
when: ansible_distribution == "Fedora" and not is_atomic
- name: Configure containerd repository on RedHat/CentOS
template:
src: "rh_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: check if container-selinux is available
yum:
list: "container-selinux"
register: yum_result
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
yum_repository:
name: extras
description: "CentOS-7 - Extras"
state: present
baseurl: "{{ extras_rh_repo_base_url }}"
file: "extras"
gpgcheck: yes
gpgkey: "{{ extras_rh_repo_gpgkey }}"
keepcache: "{{ containerd_rpm_keepcache | default('1') }}"
proxy: " {{ http_proxy | default('_none_') }}"
when:
- ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- yum_result.results | length == 0
- name: Copy yum.conf for editing
copy:
src: "{{ yum_conf }}"
dest: "{{ containerd_yum_conf }}"
remote_src: yes
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Edit copy of yum.conf to set obsoletes=0
lineinfile:
path: "{{ containerd_yum_conf }}"
state: present
regexp: '^obsoletes='
line: 'obsoletes=0'
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic

View File

@@ -1,27 +0,0 @@
---
- name: crictl | Download crictl
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.crictl) }}"
- name: Install crictl config
template:
src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml
owner: bin
mode: 0644
- name: Copy crictl binary from download dir
synchronize:
src: "{{ local_release_dir }}/crictl"
dest: "{{ bin_dir }}/crictl"
compress: no
perms: yes
owner: no
group: no
delegate_to: "{{ inventory_hostname }}"
- name: Install crictl completion
shell: "{{ bin_dir }}/crictl completion >/etc/bash_completion.d/crictl"
ignore_errors: True
when: ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]

View File

@@ -1,95 +0,0 @@
---
- name: Fail containerd setup if distribution is not supported
fail:
msg: "{{ ansible_distribution }} is not supported by containerd."
when:
- not ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts
- include_tasks: containerd_repo.yml
- name: ensure containerd config directory
file:
dest: "{{ containerd_cfg_dir }}"
state: directory
mode: 0755
owner: root
group: root
- name: Copy containerd config file
template:
src: config.toml.j2
dest: "{{ containerd_cfg_dir }}/config.toml"
owner: "root"
mode: 0644
notify: restart containerd
# This is required to ensure any apt upgrade will not break kubernetes
- name: Set containerd pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_containerd.j2"
dest: "/etc/apt/preferences.d/containerd"
owner: "root"
mode: 0644
when:
- ansible_os_family in ['Ubuntu', 'Debian']
- not is_atomic
- name: ensure containerd packages are installed
action: "{{ containerd_package_info.pkg_mgr }}"
args:
pkg: "{{ item.name }}"
force: "{{ item.force | default(omit) }}"
conf_file: "{{ item.yum_conf | default(omit) }}"
state: present
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
register: containerd_task_result
until: containerd_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ containerd_package_info.pkgs }}"
notify: restart containerd
when:
- not is_atomic
- containerd_package_info.pkgs|length > 0
ignore_errors: true
- name: Check if runc is installed
stat:
path: "{{ runc_binary }}"
register: runc_stat
- name: Install runc package if necessary
action: "{{ containerd_package_info.pkg_mgr }}"
args:
pkg: runc
state: present
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
register: runc_task_result
until: runc_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
notify: restart containerd
when:
- not is_atomic
- not runc_stat.stat.exists
- include_tasks: crictl.yml

View File

@@ -1,3 +0,0 @@
Package: {{ containerd_package }}
Pin: version {{ containerd_version }}*
Pin-Priority: 1001

View File

@@ -1,40 +0,0 @@
# Kubernetes doesn't use containerd restart manager.
disabled_plugins = ["restart"]
[debug]
level = "{{ containerd_config.debug.level | default("") }}"
{% if 'grpc' in containerd_config %}
[grpc]
{% for param, value in containerd_config.grpc.items() %}
{{ param }} = {{ value }}
{% endfor %}
{% endif %}
[plugins.linux]
shim = "/usr/bin/containerd-shim"
runtime = "{{ runc_binary }}"
[plugins.cri]
stream_server_address = "127.0.0.1"
max_container_log_line_size = {{ containerd_config.max_container_log_line_size }}
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
{% if 'registries' in containerd_config %}
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
{% for registry, addr in containerd_config.registries.items() %}
[plugins.cri.registry.mirrors."{{ registry }}"]
endpoint = ["{{ addr }}"]
{% endfor %}
{% endif %}

View File

@@ -1,4 +0,0 @@
runtime-endpoint: unix://{{ cri_socket }}
image-endpoint: unix://{{ cri_socket }}
timeout: 30
debug: false

View File

@@ -1,9 +0,0 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}

View File

@@ -1,9 +0,0 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}

View File

@@ -1,28 +0,0 @@
---
containerd_versioned_pkg:
'latest': "{{ containerd_package }}"
'1.2.4': "{{ containerd_package }}-1.2.4-3.1.el7"
'1.2.5': "{{ containerd_package }}-1.2.5-3.1.el7"
'1.2.6': "{{ containerd_package }}-1.2.6-3.3.el7"
'stable': "{{ containerd_package }}-1.2.6-3.3.el7"
'edge': "{{ containerd_package }}-1.2.6-3.3.el7"
containerd_package_info:
pkg_mgr: yum
pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
containerd_pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
yum_conf: "{{ containerd_yum_conf }}"
containerd_repo_key_info:
pkg_key: ''
repo_keys: []
containerd_repo_info:
pkg_repo: ''
repos: []
runc_binary: /bin/runc

View File

@@ -1,17 +0,0 @@
---
# docker-ce containerd.io does not contain daemon
containerd_package: containerd
containerd_package_info:
pkg_mgr: zypper
pkgs:
- name: "{{ containerd_package }}"
state: latest
containerd_repo_key_info:
pkg_key: ''
repo_keys: []
containerd_repo_info:
pkg_repo: ''
repos: []

View File

@@ -1,31 +0,0 @@
---
containerd_versioned_pkg:
'latest': "{{ containerd_package }}"
'1.2.4': "{{ containerd_package }}=1.2.4-1"
'1.2.5': "{{ containerd_package }}=1.2.5-1"
'1.2.6': "{{ containerd_package }}=1.2.6-3"
'stable': "{{ containerd_package }}=1.2.4-1"
'edge': "{{ containerd_package }}=1.2.4-1"
containerd_package_info:
pkg_mgr: apt
pkgs:
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
force: false
containerd_repo_key_info:
pkg_key: apt_key
url: '{{ containerd_ubuntu_repo_gpgkey }}'
repo_keys:
- '{{ containerd_ubuntu_repo_repokey }}'
containerd_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ containerd_ubuntu_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ containerd_ubuntu_repo_component }}
runc_binary: /usr/bin/runc

View File

@@ -1,2 +1,2 @@
---
crio_rhel_repo_base_url: 'http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin311/'
crio_rhel_repo_base_url: 'https://cbs.centos.org/repos/paas7-openshift-origin311-candidate/x86_64/os/'

View File

@@ -22,13 +22,7 @@
description: OpenShift Origin Repo
baseurl: "{{ crio_rhel_repo_base_url }}"
gpgcheck: no
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
- name: Add CRI-O PPA
apt_repository:
repo: ppa:projectatomic/ppa
state: present
when: ansible_distribution in ["Ubuntu"]
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Make sure needed folders exist in the system
with_items:

View File

@@ -64,7 +64,7 @@ file_locking = true
# This is a mandatory setting as this runtime will be the default one
# and will also be used for untrusted container workloads if
# runtime_untrusted_workload is not set.
{% if ansible_os_family == "ClearLinux" or ansible_os_family == "RedHat" or ansible_distribution == "Ubuntu" %}
{% if ansible_os_family == "ClearLinux" %}
runtime = "/usr/bin/runc"
{% else %}
runtime = "/usr/sbin/runc"
@@ -96,7 +96,7 @@ default_workload_trust = "trusted"
no_pivot = false
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "{{ crio_conmon }}"
conmon = "/usr/libexec/crio/conmon"
# conmon_env is the environment variable list for conmon process,
# used for passing necessary environment variable to conmon or runtime.

View File

@@ -3,4 +3,3 @@ crio_packages:
- containers-basic
crio_service: crio
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -4,4 +4,3 @@ crio_packages:
- cri-tools
crio_service: cri-o
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -5,4 +5,3 @@ crio_packages:
- oci-systemd-hook
crio_service: crio
crio_conmon: /usr/libexec/crio/conmon

View File

@@ -1,6 +0,0 @@
---
crio_packages:
- "cri-o-{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
crio_service: crio
crio_conmon: /usr/lib/crio/bin/conmon

View File

@@ -38,6 +38,11 @@ docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
# Debian docker-ce repo
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
# dockerproject repo
dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
docker_bin_dir: "/usr/bin"
# CentOS/RedHat Extras repo
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"

View File

@@ -1,8 +0,0 @@
---
- name: Install Docker plugin
command: docker plugin install --grant-all-permissions {{ docker_plugin | quote }}
when: docker_plugin is defined
register: docker_plugin_status
failed_when:
- docker_plugin_status.failed
- '"already exists" not in docker_plugin_status.stderr'

View File

@@ -4,8 +4,7 @@
path: /run/ostree-booted
register: ostree
- name: set is_atomic
set_fact:
- set_fact:
is_atomic: "{{ ostree.stat.exists }}"
- name: gather os specific variables
@@ -27,6 +26,9 @@
tags:
- facts
# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
# openSUSE version so we can't use it. The only alternative is to use the docker
# packages from the distribution repositories.
- name: Warn about Docker version on SUSE
debug:
msg: "SUSE distributions always install Docker from the distro repos"
@@ -101,16 +103,11 @@
dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution == "Fedora" and not is_atomic
- name: Configure docker repository on RedHat/CentOS/Oracle Linux
yum_repository:
name: docker-ce
baseurl: "{{ docker_rh_repo_base_url }}"
description: "Docker CE Stable - $basearch"
gpgcheck: yes
gpgkey: "{{ docker_rh_repo_gpgkey }}"
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
proxy: " {{ http_proxy | default('_none_') }}"
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
- name: Configure docker repository on RedHat/CentOS
template:
src: "rh_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: check if container-selinux is available
yum:
@@ -138,7 +135,7 @@
src: "{{ yum_conf }}"
dest: "{{ docker_yum_conf }}"
remote_src: yes
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: Edit copy of yum.conf to set obsoletes=0
lineinfile:
@@ -146,7 +143,7 @@
state: present
regexp: '^obsoletes='
line: 'obsoletes=0'
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
@@ -194,11 +191,13 @@
- ansible_distribution == 'Ubuntu'
# This is required to ensure any apt upgrade will not break kubernetes
- name: Tell Debian hosts not to change the docker version with apt upgrade
dpkg_selections:
name: docker-ce
selection: hold
when: ansible_os_family in ["Debian"]
- name: Set docker pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_docker.j2"
dest: "/etc/apt/preferences.d/docker"
owner: "root"
mode: 0644
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "ClearLinux", "RedHat", "Suse"] or is_atomic)
- name: ensure docker started, remove our config if docker start failed and try again
block:
@@ -237,12 +236,6 @@
resolvconf_mode == 'docker_dns' and
installed_docker_version.stdout is version('1.12', '<')
# Install each plugin using a looped include to make error handling in the included task simpler.
- include_tasks: docker_plugin.yml
loop: "{{ docker_plugins }}"
loop_control:
loop_var: docker_plugin
- name: Set docker systemd config
import_tasks: systemd.yml

View File

@@ -1,12 +1,4 @@
---
- name: Remove legacy docker repo file
file:
path: "{{ yum_repo_dir }}/docker.repo"
state: absent
when:
- ansible_distribution in ["CentOS","RedHat","OracleLinux"]
- not is_atomic
- name: Ensure old versions of Docker are not installed. | Debian
apt:
name: '{{ docker_remove_packages_apt }}'

View File

@@ -0,0 +1,3 @@
Package: docker-ce
Pin: version {{ docker_version }}.*
Pin-Priority: 1001

View File

@@ -1,15 +1,5 @@
[Service]
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }} \
{% for i in docker_insecure_registries %}--insecure-registry={{ i }} {% endfor %} \
{% for i in docker_registry_mirrors %}--registry-mirror={{ i }} {% endfor %} \
{% if docker_version != "latest" and docker_version is version('17.05', '<') %}--graph={% else %}--data-root={% endif %}{{ docker_daemon_graph }} \
{% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %} \
{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %} \
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd \
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false \
{% endif %}"
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }}
{% endif %}

View File

@@ -0,0 +1,17 @@
[docker-ce]
name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
[docker-engine]
name=Docker-Engine Repository
baseurl={{ dockerproject_rh_repo_base_url }}
enabled=1
gpgcheck=1
keepcache={{ docker_rpm_keepcache | default('1') }}
gpgkey={{ dockerproject_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}

View File

@@ -2,6 +2,7 @@
docker_kernel_min_version: '3.10'
# https://download.docker.com/linux/debian/
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
docker_versioned_pkg:
'latest': docker-ce
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
@@ -11,9 +12,9 @@ docker_versioned_pkg:
'17.12': docker-ce=17.12.1~ce-0~debian
'18.03': docker-ce=18.03.1~ce-0~debian
'18.06': docker-ce=18.06.2~ce~3-0~debian
'18.09': docker-ce=5:18.09.7~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:18.09.7~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:18.09.7~3-0~debian-{{ ansible_distribution_release|lower }}
'18.09': docker-ce=5:18.09.5~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:18.09.5~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:18.09.5~3-0~debian-{{ ansible_distribution_release|lower }}
docker_package_info:
pkg_mgr: apt
@@ -37,7 +38,7 @@ docker_repo_info:
dockerproject_repo_key_info:
pkg_key: apt_key
url: '{{ docker_debian_repo_gpgkey }}'
url: '{{ dockerproject_apt_repo_gpgkey }}'
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
@@ -45,6 +46,6 @@ dockerproject_repo_info:
pkg_repo: apt_repository
repos:
- >
deb {{ docker_debian_repo_base_url }}
deb {{ dockerproject_apt_repo_base_url }}
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

Some files were not shown because too many files have changed in this diff Show More