mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72a0d78b3c | ||
|
|
13d08af054 | ||
|
|
80a7ae9845 | ||
|
|
6c30a7b2eb | ||
|
|
76b72338da | ||
|
|
a39e78d42d | ||
|
|
4550dccb84 | ||
|
|
01ce09f343 | ||
|
|
71dca67ca2 | ||
|
|
327f9baccf | ||
|
|
a98b866a66 | ||
|
|
3aabba7535 | ||
|
|
c22cfa255b | ||
|
|
af211b3d71 | ||
|
|
6bb3463e7c | ||
|
|
8b151d12b9 | ||
|
|
ecb6dc3679 | ||
|
|
49a223a17d | ||
|
|
e5cfdc648c | ||
|
|
9f9f70aade | ||
|
|
e91c04f586 | ||
|
|
277fa6c12d | ||
|
|
ca3050ec3d | ||
|
|
1b3ced152b | ||
|
|
97031f9133 | ||
|
|
c92506e2e7 | ||
|
|
65a9772adf | ||
|
|
1e07ee6cc4 | ||
|
|
01a130273f | ||
|
|
3c710219a1 | ||
|
|
2ba285a544 | ||
|
|
668d02846d | ||
|
|
48edf1757b | ||
|
|
db121049b3 | ||
|
|
8058cdbc0e | ||
|
|
31d357284a | ||
|
|
4ee77ce026 |
79
.gitignore
vendored
79
.gitignore
vendored
@@ -8,12 +8,85 @@ temp
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
*.egg-info
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
vagrant/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# IPython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
@@ -18,10 +18,7 @@ variables:
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install ansible==2.3.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
|
||||
@@ -59,7 +56,7 @@ before_script:
|
||||
RESOLVCONF_MODE: docker_dns
|
||||
LOG_LEVEL: "-vv"
|
||||
ETCD_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "host"
|
||||
VAULT_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
|
||||
@@ -75,10 +72,7 @@ before_script:
|
||||
- $HOME/.cache
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install ansible==2.3.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
@@ -110,7 +104,7 @@ before_script:
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kargo repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout acae0fe4a36bd1d3cd267e72ad01126a72d1458a
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 72ae7638bcc94c66afa8620dfa4ad9a9249327ea
|
||||
|
||||
|
||||
# Create cluster
|
||||
@@ -266,8 +260,9 @@ before_script:
|
||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLUSTER_MODE: separate
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
@@ -279,7 +274,6 @@ before_script:
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLUSTER_MODE: ha
|
||||
UPGRADE_TEST: "graceful"
|
||||
STARTUP_SCRIPT: ""
|
||||
@@ -297,6 +291,7 @@ before_script:
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLUSTER_MODE: default
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
@@ -311,7 +306,7 @@ before_script:
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
@@ -350,7 +345,7 @@ before_script:
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: coreos-alpha-1325-0-0-v20170216
|
||||
CLOUD_IMAGE: coreos-alpha-1506-0-0-v20170817
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: ha-scale
|
||||
BOOTSTRAP_OS: coreos
|
||||
@@ -367,15 +362,14 @@ before_script:
|
||||
KUBELET_DEPLOYMENT: rkt
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
#Note(mattymo): Vault deployment is broken and needs work
|
||||
#.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||
## stage: deploy-gce-part1
|
||||
# KUBE_NETWORK_PLUGIN: canal
|
||||
# CERT_MGMT: vault
|
||||
# CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
# CLOUD_REGION: us-central1-b
|
||||
# CLUSTER_MODE: separate
|
||||
# STARTUP_SCRIPT: ""
|
||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CERT_MGMT: vault
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
|
||||
# stage: deploy-gce-special
|
||||
@@ -600,17 +594,16 @@ ubuntu-rkt-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
#Note(mattymo): Vault deployment is broken (https://github.com/kubernetes-incubator/kubespray/issues/1545)
|
||||
#ubuntu-vault-sep:
|
||||
# stage: deploy-gce-part1
|
||||
# <<: *job
|
||||
# <<: *gce
|
||||
# variables:
|
||||
# <<: *gce_variables
|
||||
# <<: *ubuntu_vault_sep_variables
|
||||
# when: manual
|
||||
# except: ['triggers']
|
||||
# only: ['master', /^pr-.*$/]
|
||||
ubuntu-vault-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_vault_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-flannel-rbac-sep:
|
||||
stage: deploy-gce-special
|
||||
@@ -643,6 +636,13 @@ syntax-check:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||
except: ['triggers', 'master']
|
||||
|
||||
yamllint:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- yamllint roles
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
<<: *job
|
||||
|
||||
16
.yamllint
Normal file
16
.yamllint
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: consistent
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
truthy: disable
|
||||
@@ -53,13 +53,13 @@ Versions of supported components
|
||||
--------------------------------
|
||||
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||
[weave](http://weave.works/) v2.0.1 <br>
|
||||
[docker](https://www.docker.com/) v1.13.1 (see note)<br>
|
||||
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||
|
||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
@@ -10,3 +10,4 @@ fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles
|
||||
|
||||
@@ -9,10 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(az &>/dev/null) ] ; then
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
elif [ $(azure &>/dev/null) ] ; then
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
|
||||
@@ -9,7 +9,7 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(az &>/dev/null) ] ; then
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
|
||||
@@ -9,9 +9,9 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# check if azure cli 2.0 exists else use azure cli 1.0
|
||||
if [ $(az &>/dev/null) ] ; then
|
||||
if az &>/dev/null; then
|
||||
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
elif [ $(azure &>/dev/null) ]; then
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
echo "Azure cli not found"
|
||||
|
||||
60
contrib/packaging/rpm/ansible-kubespray.spec
Normal file
60
contrib/packaging/rpm/ansible-kubespray.spec
Normal file
@@ -0,0 +1,60 @@
|
||||
%global srcname ansible_kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: ansible-kubespray
|
||||
Version: XXX
|
||||
Release: XXX
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Vendor: Kubespray <smainklh@gmail.com>
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python-pbr
|
||||
|
||||
Requires: ansible
|
||||
Requires: python-jinja2
|
||||
Requires: python-netaddr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
%{__python2} setup.py build
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{version}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot}
|
||||
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%doc inventory/inventory.example
|
||||
%config /etc/kubespray/ansible.cfg
|
||||
%config /etc/kubespray/inventory/group_vars/all.yml
|
||||
%config /etc/kubespray/inventory/group_vars/k8s-cluster.yml
|
||||
%license LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info
|
||||
/usr/local/share/kubespray/roles/
|
||||
/usr/local/share/kubespray/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
@@ -161,3 +161,11 @@ Cloud providers configuration
|
||||
=============================
|
||||
|
||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||
|
||||
##### Optional : Ignore kernel's RPF check setting
|
||||
|
||||
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
||||
|
||||
```
|
||||
calico_node_ignorelooserpf: true
|
||||
```
|
||||
|
||||
@@ -23,13 +23,6 @@ ip a show dev flannel.1
|
||||
valid_lft forever preferred_lft forever
|
||||
```
|
||||
|
||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
||||
|
||||
```
|
||||
ps aux | grep docker
|
||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
||||
```
|
||||
|
||||
* Try to run a container and check its ip address
|
||||
|
||||
```
|
||||
|
||||
@@ -67,6 +67,8 @@ following default cluster paramters:
|
||||
OpenStack (default is unset)
|
||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||
Kubernetes
|
||||
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
|
||||
alpha/experimental Kubernetes features. (defaults is `[]`)
|
||||
* *authorization_modes* - A list of [authorization mode](
|
||||
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
||||
@@ -98,6 +100,11 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
proxy
|
||||
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
|
||||
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
||||
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
||||
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
||||
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
||||
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
||||
|
||||
@@ -74,6 +74,14 @@ bin_dir: /usr/local/bin
|
||||
#azure_vnet_name:
|
||||
#azure_route_table_name:
|
||||
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
|
||||
#openstack_lbaas_enabled: True
|
||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
#openstack_lbaas_create_monitor: "yes"
|
||||
#openstack_lbaas_monitor_delay: "1m"
|
||||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
#openstack_lbaas_monitor_max_retries: "3"
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
#http_proxy: ""
|
||||
#https_proxy: ""
|
||||
|
||||
@@ -23,7 +23,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
kube_api_anonymous_auth: false
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.6.7
|
||||
kube_version: v1.7.3
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -141,7 +141,7 @@ docker_bin_dir: "/usr/bin"
|
||||
|
||||
# Settings for containerized control plane (etcd/kubelet/secrets)
|
||||
etcd_deployment_type: docker
|
||||
kubelet_deployment_type: docker
|
||||
kubelet_deployment_type: host
|
||||
cert_management: script
|
||||
vault_deployment_type: docker
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pbr>=1.6
|
||||
ansible>=2.3.0
|
||||
netaddr
|
||||
jinja2>=2.9.6
|
||||
|
||||
@@ -16,6 +16,6 @@ Host {{ bastion_ip }}
|
||||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
StrictHostKeyChecking no
|
||||
{% endif %}
|
||||
|
||||
@@ -49,4 +49,3 @@
|
||||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_modules}}"
|
||||
|
||||
|
||||
@@ -27,4 +27,3 @@
|
||||
hostname:
|
||||
name: "{{inventory_hostname}}"
|
||||
when: ansible_hostname == 'localhost'
|
||||
|
||||
|
||||
@@ -6,4 +6,3 @@
|
||||
regexp: '^\w+\s+requiretty'
|
||||
dest: /etc/sudoers
|
||||
state: absent
|
||||
|
||||
|
||||
@@ -86,4 +86,3 @@
|
||||
port: 53
|
||||
timeout: 180
|
||||
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -30,6 +31,9 @@ spec:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
|
||||
@@ -47,4 +51,3 @@ spec:
|
||||
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
|
||||
- --logtostderr=true
|
||||
- --v={{ kube_log_level }}
|
||||
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: dnsmasq
|
||||
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
|
||||
@@ -35,7 +38,6 @@ spec:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ dns_cpu_limit }}
|
||||
@@ -64,4 +66,3 @@ spec:
|
||||
hostPath:
|
||||
path: /etc/dnsmasq.d-available
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
docker_version: '1.13'
|
||||
|
||||
docker_package_info:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
[Service]
|
||||
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
|
||||
--iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}"
|
||||
--iptables=false"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
docker_kernel_min_version: '3.10'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# versioning: docker-io itself is pinned at docker 1.5
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
||||
|
||||
@@ -18,15 +18,17 @@ download_localhost: False
|
||||
download_always_pull: False
|
||||
|
||||
# Versions
|
||||
kube_version: v1.6.7
|
||||
kube_version: v1.7.3
|
||||
etcd_version: v3.2.4
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: "v1.1.3"
|
||||
calico_cni_version: "v1.8.0"
|
||||
calico_policy_version: "v0.5.4"
|
||||
calico_version: "v2.5.0"
|
||||
calico_ctl_version: "v1.5.0"
|
||||
calico_cni_version: "v1.10.0"
|
||||
calico_policy_version: "v0.7.0"
|
||||
weave_version: 2.0.1
|
||||
flannel_version: v0.8.0
|
||||
flannel_version: "v0.8.0"
|
||||
flannel_cni_version: "v0.2.0"
|
||||
pod_infra_version: 3.0
|
||||
|
||||
# Download URL's
|
||||
@@ -42,13 +44,15 @@ etcd_image_repo: "quay.io/coreos/etcd"
|
||||
etcd_image_tag: "{{ etcd_version }}"
|
||||
flannel_image_repo: "quay.io/coreos/flannel"
|
||||
flannel_image_tag: "{{ flannel_version }}"
|
||||
calicoctl_image_repo: "calico/ctl"
|
||||
calicoctl_image_tag: "{{ calico_version }}"
|
||||
calico_node_image_repo: "calico/node"
|
||||
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
|
||||
flannel_cni_image_tag: "{{ flannel_cni_version }}"
|
||||
calicoctl_image_repo: "quay.io/calico/ctl"
|
||||
calicoctl_image_tag: "{{ calico_ctl_version }}"
|
||||
calico_node_image_repo: "quay.io/calico/node"
|
||||
calico_node_image_tag: "{{ calico_version }}"
|
||||
calico_cni_image_repo: "calico/cni"
|
||||
calico_cni_image_repo: "quay.io/calico/cni"
|
||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||
calico_policy_image_repo: "calico/kube-policy-controller"
|
||||
calico_policy_image_repo: "quay.io/calico/kube-policy-controller"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||
calico_rr_image_tag: "v0.3.0"
|
||||
@@ -137,6 +141,12 @@ downloads:
|
||||
tag: "{{ flannel_image_tag }}"
|
||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||
flannel_cni:
|
||||
container: true
|
||||
repo: "{{ flannel_cni_image_repo }}"
|
||||
tag: "{{ flannel_cni_image_tag }}"
|
||||
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||
calicoctl:
|
||||
container: true
|
||||
repo: "{{ calicoctl_image_repo }}"
|
||||
|
||||
@@ -25,4 +25,4 @@ etcd_memory_limit: 512M
|
||||
|
||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
||||
|
||||
etcd_compaction_retention: "0"
|
||||
etcd_compaction_retention: "8"
|
||||
|
||||
@@ -43,4 +43,3 @@
|
||||
ETCDCTL_API: 3
|
||||
retries: 3
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
|
||||
|
||||
@@ -30,4 +30,3 @@
|
||||
- name: set etcd_secret_changed
|
||||
set_fact:
|
||||
etcd_secret_changed: true
|
||||
|
||||
|
||||
@@ -66,4 +66,3 @@
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{{ certs.sync }}
|
||||
|
||||
|
||||
@@ -76,8 +76,7 @@
|
||||
'admin-{{ inventory_hostname }}.pem',
|
||||
'admin-{{ inventory_hostname }}-key.pem',
|
||||
'member-{{ inventory_hostname }}.pem',
|
||||
'member-{{ inventory_hostname }}-key.pem'
|
||||
]
|
||||
'member-{{ inventory_hostname }}-key.pem']
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
||||
'node-{{ node }}.pem',
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
tags: etcd-secrets
|
||||
|
||||
|
||||
- name: gen_certs_vault | Read in the local credentials
|
||||
command: cat /etc/vault/roles/etcd/userpass
|
||||
register: etcd_vault_creds_cat
|
||||
@@ -58,6 +57,9 @@
|
||||
[
|
||||
{%- for host in groups.etcd -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
@@ -81,6 +83,9 @@
|
||||
[
|
||||
{%- for host in etcd_node_cert_hosts -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
@@ -90,5 +95,3 @@
|
||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: "Pre-upgrade | check for etcd-proxy unit file"
|
||||
stat:
|
||||
path: /etc/systemd/system/etcd-proxy.service
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Refresh config | Create etcd config file
|
||||
template:
|
||||
src: etcd.env.yml
|
||||
src: etcd.env.j2
|
||||
dest: /etc/etcd.env
|
||||
notify: restart etcd
|
||||
when: is_etcd_master
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
|
||||
elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org'
|
||||
elrepo_rpm : elrepo-release-7.0-2.el7.elrepo.noarch.rpm
|
||||
elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm
|
||||
elrepo_mirror: http://www.elrepo.org
|
||||
|
||||
elrepo_url: '{{elrepo_mirror}}/{{elrepo_rpm}}'
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Versions
|
||||
kubedns_version: 1.14.2
|
||||
kubednsautoscaler_version: 1.1.1
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {name: kubedns, file: kubedns-sa.yml, type: sa}
|
||||
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment}
|
||||
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment}
|
||||
register: manifests
|
||||
when:
|
||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down Netchecker Template
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -28,11 +29,15 @@ spec:
|
||||
k8s-app: kubedns-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: CriticalAddonsOnly
|
||||
operator: exists
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -29,6 +30,8 @@ spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -19,4 +20,3 @@ spec:
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
|
||||
@@ -12,6 +12,9 @@ spec:
|
||||
labels:
|
||||
app: netchecker-agent
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: netchecker-agent
|
||||
image: "{{ agent_img }}"
|
||||
|
||||
@@ -16,6 +16,9 @@ spec:
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{% endif %}
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: netchecker-agent
|
||||
image: "{{ agent_img }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.elasticsearch }}"
|
||||
|
||||
@@ -38,4 +38,3 @@
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
||||
run_once: true
|
||||
when: es_service_manifest.changed
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.fluentd }}"
|
||||
|
||||
@@ -20,4 +20,3 @@
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
||||
run_once: true
|
||||
when: fluentd_ds_manifest.changed
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@ spec:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "v{{ fluentd_version }}"
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.kibana }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: kubernetes-apps/efk/elasticsearch
|
||||
- role: kubernetes-apps/efk/fluentd
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
helm_enabled: false
|
||||
|
||||
# specify a dir and attach it to helm for HELM_HOME.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.helm }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.netcheck_server }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create canal ConfigMap
|
||||
run_once: true
|
||||
kube:
|
||||
@@ -7,18 +8,6 @@
|
||||
resource: "configmap"
|
||||
namespace: "{{system_namespace}}"
|
||||
|
||||
#FIXME: remove if kubernetes/features#124 is implemented
|
||||
- name: Purge old flannel and canal-node
|
||||
run_once: true
|
||||
kube:
|
||||
name: "canal-node"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/canal-node.yaml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
state: absent
|
||||
when: inventory_hostname == groups['kube-master'][0] and canal_node_manifest.changed
|
||||
|
||||
- name: Start flannel and calico-node
|
||||
run_once: true
|
||||
kube:
|
||||
@@ -29,4 +18,3 @@
|
||||
namespace: "{{system_namespace}}"
|
||||
state: "{{ item | ternary('latest','present') }}"
|
||||
with_items: "{{ canal_node_manifest.changed }}"
|
||||
|
||||
|
||||
22
roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
Normal file
22
roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml"
|
||||
run_once: true
|
||||
when: rbac_enabled and flannel_rbac_manifest.changed
|
||||
|
||||
- name: Flannel | Start Resources
|
||||
kube:
|
||||
name: "kube-flannel"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/cni-flannel.yml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
state: "{{ item | ternary('latest','present') }}"
|
||||
with_items: "{{ flannel_manifest.changed }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Flannel | Wait for flannel subnet.env file presence
|
||||
wait_for:
|
||||
path: /run/flannel/subnet.env
|
||||
delay: 5
|
||||
timeout: 600
|
||||
@@ -3,6 +3,9 @@ dependencies:
|
||||
- role: kubernetes-apps/network_plugin/canal
|
||||
when: kube_network_plugin == 'canal'
|
||||
tags: canal
|
||||
- role: kubernetes-apps/network_plugin/flannel
|
||||
when: kube_network_plugin == 'flannel'
|
||||
tags: flannel
|
||||
- role: kubernetes-apps/network_plugin/weave
|
||||
when: kube_network_plugin == 'weave'
|
||||
tags: weave
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# FIXME: remove if kubernetes/features#124 is implemented
|
||||
- name: Weave | Purge old weave daemonset
|
||||
kube:
|
||||
@@ -9,7 +10,6 @@
|
||||
state: absent
|
||||
when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed
|
||||
|
||||
|
||||
- name: Weave | Start Resources
|
||||
kube:
|
||||
name: "weave-net"
|
||||
@@ -21,7 +21,6 @@
|
||||
with_items: "{{ weave_manifest.changed }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
||||
- name: "Weave | wait for weave to become available"
|
||||
uri:
|
||||
url: http://127.0.0.1:6784/status
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Limits for calico apps
|
||||
calico_policy_controller_cpu_limit: 100m
|
||||
calico_policy_controller_memory_limit: 256M
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- set_fact:
|
||||
calico_cert_dir: "{{ canal_cert_dir }}"
|
||||
when: kube_network_plugin == 'canal'
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
k8s-app: calico-policy
|
||||
spec:
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# An experimental dev/test only dynamic volumes provisioner,
|
||||
# for PetSets. Works for kube>=v1.3 only.
|
||||
kube_hostpath_dynamic_provisioner: "false"
|
||||
|
||||
@@ -88,4 +88,3 @@
|
||||
|
||||
- include: post-upgrade.yml
|
||||
tags: k8s-post-upgrade
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
seconds: 10
|
||||
when: needs_etcd_migration|bool
|
||||
|
||||
- name: "Post-upgrade | stop kubelet on all masters"
|
||||
- name: "Post-upgrade | start kubelet on all masters"
|
||||
service:
|
||||
name: kubelet
|
||||
state: started
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
when: kube_apiserver_storage_backend == "etcd3"
|
||||
failed_when: false
|
||||
|
||||
- name: "Pre-upgrade | etcd3 upgrade | use etcd2 unless forced to etc3"
|
||||
- name: "Pre-upgrade | etcd3 upgrade | use etcd2 unless forced to etcd3"
|
||||
set_fact:
|
||||
kube_apiserver_storage_backend: "etcd2"
|
||||
when: old_data_exists.rc == 0 and not force_etcd3|bool
|
||||
|
||||
@@ -84,6 +84,9 @@ spec:
|
||||
{% if authorization_modes %}
|
||||
- --authorization-mode={{ authorization_modes|join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_feature_gates %}
|
||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if apiserver_custom_flags is string %}
|
||||
- {{ apiserver_custom_flags }}
|
||||
{% else %}
|
||||
|
||||
@@ -45,9 +45,15 @@ spec:
|
||||
- --cloud-provider={{cloud_provider}}
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
|
||||
- --allocate-node-cidrs=true
|
||||
- --configure-cloud-routes=true
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel"] %}
|
||||
- --allocate-node-cidrs=true
|
||||
- --cluster-cidr={{ kube_pods_subnet }}
|
||||
- --service-cluster-ip-range={{ kube_service_addresses }}
|
||||
{% endif %}
|
||||
{% if kube_feature_gates %}
|
||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if controller_mgr_custom_flags is string %}
|
||||
- {{ controller_mgr_custom_flags }}
|
||||
|
||||
@@ -27,6 +27,9 @@ spec:
|
||||
- --leader-elect=true
|
||||
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
|
||||
- --v={{ kube_log_level }}
|
||||
{% if kube_feature_gates %}
|
||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if scheduler_custom_flags is string %}
|
||||
- {{ scheduler_custom_flags }}
|
||||
{% else %}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
# Valid options: docker (default), rkt, or host
|
||||
kubelet_deployment_type: docker
|
||||
kubelet_deployment_type: host
|
||||
|
||||
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
|
||||
kube_apiserver_insecure_bind_address: 127.0.0.1
|
||||
@@ -15,8 +16,8 @@ kube_proxy_masquerade_all: false
|
||||
|
||||
# These options reflect limitations of running kubelet in a container.
|
||||
# Modify at your own risk
|
||||
kubelet_enable_cri: false
|
||||
kubelet_cgroups_per_qos: false
|
||||
kubelet_enable_cri: true
|
||||
kubelet_cgroups_per_qos: true
|
||||
# Set to empty to avoid cgroup creation
|
||||
kubelet_enforce_node_allocatable: "\"\""
|
||||
|
||||
|
||||
@@ -21,4 +21,3 @@
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
notify: restart kubelet
|
||||
|
||||
|
||||
@@ -30,4 +30,3 @@
|
||||
dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf
|
||||
when: http_proxy is defined or https_proxy is defined or no_proxy is defined
|
||||
notify: restart kubelet
|
||||
|
||||
|
||||
@@ -4,3 +4,8 @@
|
||||
args:
|
||||
creates: "/var/lib/cni"
|
||||
failed_when: false
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container is stopped if using host deployment"
|
||||
command: docker stop kubelet
|
||||
failed_when: false
|
||||
when: kubelet_deployment_type == "host"
|
||||
|
||||
@@ -36,8 +36,14 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
|
||||
{% if standalone_kubelet|bool %}
|
||||
{# We are on a master-only host. Make the master unschedulable in this case. #}
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
|
||||
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %}
|
||||
{% else %}
|
||||
{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
|
||||
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# Kubelet node labels #}
|
||||
{% if inventory_hostname in groups['kube-master'] %}
|
||||
@@ -49,14 +55,13 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
|
||||
{% endif %}
|
||||
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %}
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave"] %}
|
||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
||||
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
|
||||
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
||||
# Please note that --reconcile-cidr is deprecated and a no-op in Kubernetes 1.5 but still required in 1.4
|
||||
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet --reconcile-cidr=true"
|
||||
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
|
||||
{% endif %}
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||
|
||||
@@ -32,7 +32,7 @@ ExecStart=/usr/bin/rkt run \
|
||||
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
||||
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
{% if kube_network_plugin in ["calico", "weave", "canal"] %}
|
||||
{% if kube_network_plugin in ["calico", "weave", "canal", "flannel"] %}
|
||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Preinstall | restart network
|
||||
command: /bin/true
|
||||
notify:
|
||||
|
||||
@@ -48,5 +48,3 @@
|
||||
fail:
|
||||
msg: "azure_route_table_name is missing"
|
||||
when: azure_route_table_name is not defined or azure_route_table_name == ""
|
||||
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@
|
||||
- "/etc/cni/net.d"
|
||||
- "/opt/cni/bin"
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "weave", "canal"]
|
||||
- kube_network_plugin in ["calico", "weave", "canal", "flannel"]
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
tags: [network, calico, weave, canal, bootstrap-os]
|
||||
|
||||
@@ -128,6 +128,7 @@
|
||||
when:
|
||||
- ansible_distribution in ["CentOS","RedHat"]
|
||||
- not is_atomic
|
||||
- epel_rpm_download_url != ''
|
||||
register: epel_task_result
|
||||
until: epel_task_result|succeeded
|
||||
retries: 4
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: check vsphere environment variables
|
||||
fail:
|
||||
msg: "{{ item.name }} is missing"
|
||||
|
||||
@@ -7,3 +7,12 @@ tenant-id={{ openstack_tenant_id }}
|
||||
{% if openstack_domain_name is defined and openstack_domain_name != "" %}
|
||||
domain-name={{ openstack_domain_name }}
|
||||
{% endif %}
|
||||
|
||||
{% if openstack_lbaas_enabled and openstack_lbaas_subnet_id %}
|
||||
[LoadBalancer]
|
||||
subnet-id={{ openstack_lbaas_subnet_id }}
|
||||
create-monitor={{ openstack_lbaas_create_monitor }}
|
||||
monitor-delay={{ openstack_lbaas_monitor_delay }}
|
||||
monitor-timeout={{ openstack_lbaas_monitor_timeout }}
|
||||
monitor-max-retries={{ openstack_lbaas_monitor_max_retries }}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
required_pkgs:
|
||||
- python-apt
|
||||
- aufs-tools
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
||||
@@ -82,10 +82,13 @@ gen_key_and_cert() {
|
||||
|
||||
# Admins
|
||||
if [ -n "$MASTERS" ]; then
|
||||
# If any host requires new certs, just regenerate all master certs
|
||||
# kube-apiserver
|
||||
# Generate only if we don't have existing ca and apiserver certs
|
||||
if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then
|
||||
gen_key_and_cert "apiserver" "/CN=kube-apiserver"
|
||||
cat ca.pem >> apiserver.pem
|
||||
fi
|
||||
# If any host requires new certs, just regenerate scheduler and controller-manager master certs
|
||||
# kube-scheduler
|
||||
gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler"
|
||||
# kube-controller-manager
|
||||
|
||||
@@ -105,4 +105,3 @@
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{{ certs.sync }}
|
||||
|
||||
|
||||
@@ -74,8 +74,7 @@
|
||||
'kube-scheduler.pem',
|
||||
'kube-scheduler-key.pem',
|
||||
'kube-controller-manager.pem',
|
||||
'kube-controller-manager-key.pem',
|
||||
]
|
||||
'kube-controller-manager-key.pem']
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
'node-{{ node }}.pem',
|
||||
@@ -87,8 +86,7 @@
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}-key.pem',
|
||||
]
|
||||
'kube-proxy-{{ inventory_hostname }}-key.pem']
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather master certs
|
||||
@@ -195,4 +193,3 @@
|
||||
- name: Gen_certs | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
when: kube_ca_cert.changed and ansible_os_family == "RedHat"
|
||||
|
||||
|
||||
@@ -10,16 +10,16 @@
|
||||
- name: gen_certs_vault | Read in the local credentials
|
||||
command: cat /etc/vault/roles/kube/userpass
|
||||
register: kube_vault_creds_cat
|
||||
when: inventory_hostname == groups['k8s-cluster']|first
|
||||
delegate_to: "{{ groups['k8s-cluster'][0] }}"
|
||||
|
||||
- name: gen_certs_vault | Set facts for read Vault Creds
|
||||
set_fact:
|
||||
kube_vault_creds: "{{ hostvars[groups['k8s-cluster']|first]['kube_vault_creds_cat']['stdout'] | from_json }}"
|
||||
when: inventory_hostname == groups['k8s-cluster']|first
|
||||
kube_vault_creds: "{{ kube_vault_creds_cat.stdout|from_json }}"
|
||||
delegate_to: "{{ groups['k8s-cluster'][0] }}"
|
||||
|
||||
- name: gen_certs_vault | Log into Vault and obtain an token
|
||||
uri:
|
||||
url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ kube_vault_creds.username }}"
|
||||
url: "{{ hostvars[groups['vault'][0]]['vault_leader_url'] }}/v1/auth/userpass/login/{{ kube_vault_creds.username }}"
|
||||
headers:
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
@@ -28,14 +28,15 @@
|
||||
body:
|
||||
password: "{{ kube_vault_creds.password }}"
|
||||
register: kube_vault_login_result
|
||||
when: inventory_hostname == groups['k8s-cluster']|first
|
||||
delegate_to: "{{ groups['k8s-cluster'][0] }}"
|
||||
|
||||
- name: gen_certs_vault | Set fact for Vault API token
|
||||
set_fact:
|
||||
kube_vault_headers:
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
X-Vault-Token: "{{ hostvars[groups['k8s-cluster']|first]['kube_vault_login_result']['json']['auth']['client_token'] }}"
|
||||
X-Vault-Token: "{{ kube_vault_login_result.get('json',{}).get('auth', {}).get('client_token') }}"
|
||||
run_once: true
|
||||
|
||||
# Issue certs to kube-master nodes
|
||||
- include: ../../../vault/tasks/shared/issue_cert.yml
|
||||
@@ -67,13 +68,16 @@
|
||||
[
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
with_items: "{{ kube_api_certs_needed|d([]) }}"
|
||||
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
# Issue node certs to k8s-cluster nodes
|
||||
@@ -89,3 +93,16 @@
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
with_items: "{{ kube_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
- include: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_headers: "{{ kube_vault_headers }}"
|
||||
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
@@ -27,24 +27,24 @@
|
||||
|
||||
- include: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "apiserver.pem"
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"]
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for apiserver sync_file results
|
||||
- name: sync_kube_master_certs | Set facts for kube master components sync_file results
|
||||
set_fact:
|
||||
kube_api_certs_needed: "{{ item.path }}"
|
||||
kube_master_components_certs_needed: "{{ kube_master_components_certs_needed|d([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after apiserver cert
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after kube master components cert
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
|
||||
- include: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
|
||||
@@ -36,3 +36,27 @@
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- name: sync_kube_node_certs | Create list of needed kube-proxy certs
|
||||
set_fact:
|
||||
kube_proxy_cert_list: "{{ kube_proxy_cert_list|default([]) + ['kube-proxy-' + item + '.pem'] }}"
|
||||
with_items: "{{ groups['k8s-cluster'] }}"
|
||||
|
||||
- include: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_proxy_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube-proxy sync_file results
|
||||
set_fact:
|
||||
kube_proxy_certs_needed: "{{ kube_proxy_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after kube proxy certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user