mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 05:45:06 +03:00
Compare commits
171 Commits
v2.0.1
...
test-tag-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a222be7fae | ||
|
|
9d43cd86be | ||
|
|
6ed99f1f44 | ||
|
|
be1e1b41bd | ||
|
|
fd30131dc2 | ||
|
|
b7bf502e02 | ||
|
|
3f70e3a843 | ||
|
|
cae2982d81 | ||
|
|
b638c89556 | ||
|
|
9bc51bd0e2 | ||
|
|
408b4f3f42 | ||
|
|
d818ac1d59 | ||
|
|
bd1c764a1a | ||
|
|
8f377ad8bd | ||
|
|
97dabbe997 | ||
|
|
5a7a3f6d4a | ||
|
|
b4327fdc99 | ||
|
|
10f924a617 | ||
|
|
3dd6a01c8b | ||
|
|
585afef945 | ||
|
|
bdc65990e1 | ||
|
|
f2e4ffcac2 | ||
|
|
ae66b6e648 | ||
|
|
923057c1a8 | ||
|
|
0f6e08d34f | ||
|
|
4889a3e2e1 | ||
|
|
39d87a96aa | ||
|
|
e7c03ba66a | ||
|
|
08822ec684 | ||
|
|
6463a01e04 | ||
|
|
0cf1850465 | ||
|
|
1418fb394b | ||
|
|
e4eda88ca9 | ||
|
|
71a3c97d6f | ||
|
|
1c3d2924ae | ||
|
|
a11b9d28bd | ||
|
|
b54eb609bf | ||
|
|
dc8ff413f9 | ||
|
|
f8ffa1601d | ||
|
|
da01bc1fbb | ||
|
|
a2079a9ca9 | ||
|
|
a627299468 | ||
|
|
e5fdc63bdd | ||
|
|
fe83e70074 | ||
|
|
46c177b982 | ||
|
|
1df50adc1c | ||
|
|
b6cd9a4c4b | ||
|
|
2333ec4d1f | ||
|
|
85a8a54d3e | ||
|
|
7294a22901 | ||
|
|
f4b7474ade | ||
|
|
9428321607 | ||
|
|
882544446a | ||
|
|
73160c9b90 | ||
|
|
2184d6a3ff | ||
|
|
6e35895b44 | ||
|
|
8009ff8537 | ||
|
|
9bf792ce0b | ||
|
|
f05aaeb329 | ||
|
|
1bdf34e7dc | ||
|
|
cd25bfca91 | ||
|
|
1b621ab81c | ||
|
|
cb2e5ac776 | ||
|
|
8ce32eb3e1 | ||
|
|
aae0314bda | ||
|
|
35d5248d41 | ||
|
|
0ccc2555d3 | ||
|
|
b26a711e96 | ||
|
|
2218a052b2 | ||
|
|
40f419ca54 | ||
|
|
f742fc3dd1 | ||
|
|
33fbcc56d6 | ||
|
|
61d05dea58 | ||
|
|
8a821060a3 | ||
|
|
0d44599a63 | ||
|
|
8e29b08070 | ||
|
|
b6c3e61603 | ||
|
|
dc08b75c6a | ||
|
|
5420fa942e | ||
|
|
1ee33d3a8d | ||
|
|
61dab8dc0b | ||
|
|
0022a2b29e | ||
|
|
b2a27ed089 | ||
|
|
d8ae50800a | ||
|
|
43fa72b7b7 | ||
|
|
36b62b7270 | ||
|
|
73204c868d | ||
|
|
2ee889843a | ||
|
|
74b78e75a1 | ||
|
|
6905edbeb6 | ||
|
|
6c69da1573 | ||
|
|
e776dfd800 | ||
|
|
95bf380d07 | ||
|
|
2a61ad1b57 | ||
|
|
80703010bd | ||
|
|
e88c10670e | ||
|
|
2a2953c674 | ||
|
|
1054f37765 | ||
|
|
f77257cf79 | ||
|
|
f004cc07df | ||
|
|
065a4da72d | ||
|
|
98c7f2eb13 | ||
|
|
d332502d3d | ||
|
|
a7bf7867d7 | ||
|
|
c63cda7c21 | ||
|
|
caab0cdf27 | ||
|
|
1191876ae8 | ||
|
|
fa51a589ef | ||
|
|
3f274115b0 | ||
|
|
3b0918981e | ||
|
|
a327dfeed7 | ||
|
|
d8cef34d6c | ||
|
|
6fb6947feb | ||
|
|
db8173da28 | ||
|
|
bcdfb3cfb0 | ||
|
|
79aeb10431 | ||
|
|
5fd2b151b9 | ||
|
|
3c107ef4dc | ||
|
|
a5f93d6013 | ||
|
|
38338e848d | ||
|
|
e9518072a8 | ||
|
|
10dbd0afbd | ||
|
|
e22f938ae5 | ||
|
|
1dce56e2f8 | ||
|
|
1f0b2eac12 | ||
|
|
d9539e0f27 | ||
|
|
0909368339 | ||
|
|
091b634ea1 | ||
|
|
d18804b0bb | ||
|
|
a8b5b856d1 | ||
|
|
1d2a18b355 | ||
|
|
4a59340182 | ||
|
|
aa33613b98 | ||
|
|
cf042b2a4c | ||
|
|
65c86377fc | ||
|
|
96372c15e2 | ||
|
|
f365b32c60 | ||
|
|
5af2c42bde | ||
|
|
c0400e9db5 | ||
|
|
f7447837c5 | ||
|
|
a4dbee3e38 | ||
|
|
fb7899aa06 | ||
|
|
6d54d9f49a | ||
|
|
6546869c42 | ||
|
|
aa79a02f9c | ||
|
|
447febcdd6 | ||
|
|
61732847b6 | ||
|
|
fcd9d97f10 | ||
|
|
b6b5d52f78 | ||
|
|
4b6f29d5e1 | ||
|
|
f5d5230034 | ||
|
|
8dc19374cc | ||
|
|
a8f2af0503 | ||
|
|
d8a2941e9e | ||
|
|
55b6d0bbdd | ||
|
|
a3c044b657 | ||
|
|
4a2abc1a46 | ||
|
|
410c78f2e5 | ||
|
|
3b5830a1cf | ||
|
|
ab7df10a7d | ||
|
|
93663e987c | ||
|
|
6114266b84 | ||
|
|
97f96a6376 | ||
|
|
58062be2a3 | ||
|
|
5ec4efe88e | ||
|
|
e02aae71a1 | ||
|
|
1f9f885379 | ||
|
|
80509673d2 | ||
|
|
b902110d75 | ||
|
|
53affb9bc0 | ||
|
|
8e4e3998dd |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||
|
||||
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||
|
||||
<!--
|
||||
If this is a BUG REPORT, please:
|
||||
- Fill in as much of the template below as you can. If you leave out
|
||||
information, we can't help you as well.
|
||||
|
||||
If this is a FEATURE REQUEST, please:
|
||||
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||
|
||||
In both cases, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
-->
|
||||
|
||||
**Environment**:
|
||||
- **Cloud provider or hardware configuration:**
|
||||
|
||||
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
|
||||
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
||||
**Network plugin used**:
|
||||
|
||||
|
||||
**Copy of your inventory file:**
|
||||
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
||||
**Output of ansible run**:
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Anything else do we need to know**:
|
||||
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -10,4 +10,5 @@ temp
|
||||
*.pyo
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
|
||||
161
.gitlab-ci.yml
161
.gitlab-ci.yml
@@ -1,4 +1,5 @@
|
||||
stages:
|
||||
- moderator
|
||||
- unit-tests
|
||||
- deploy-gce-part1
|
||||
- deploy-gce-part2
|
||||
@@ -17,7 +18,7 @@ variables:
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install ansible
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
@@ -48,7 +49,13 @@ before_script:
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
BOOTSTRAP_OS: none
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESOLVCONF_MODE: docker_dns
|
||||
LOG_LEVEL: "-vv"
|
||||
ETCD_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
MAGIC: "ci check this"
|
||||
|
||||
.gce: &gce
|
||||
<<: *job
|
||||
@@ -58,10 +65,9 @@ before_script:
|
||||
paths:
|
||||
- downloads/
|
||||
- $HOME/.cache
|
||||
stage: deploy-gce
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install ansible==2.1.3.0
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
@@ -80,28 +86,37 @@ before_script:
|
||||
- ls
|
||||
- echo ${PWD}
|
||||
- >
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||
${LOG_LEVEL}
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
|
||||
# Create cluster
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-e deploy_netchecker=true
|
||||
-e download_localhost=true
|
||||
-e download_run_once=true
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
cluster.yml
|
||||
|
||||
|
||||
@@ -115,6 +130,69 @@ before_script:
|
||||
## Advanced DNS checks
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
## Idempotency checks 1/5 (repeat deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
## Idempotency checks 3/5 (reset deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
reset.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 4/5 (redeploy after reset)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
after_script:
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
@@ -132,10 +210,11 @@ before_script:
|
||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
|
||||
.debian8_canal_ha_variables: &debian8_canal_ha_variables
|
||||
# stage: deploy-gce-part1
|
||||
@@ -168,24 +247,27 @@ before_script:
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
IDEMPOT_CHECK: "false"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
@@ -193,6 +275,7 @@ before_script:
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
@@ -202,7 +285,16 @@ before_script:
|
||||
CLUSTER_MODE: ha
|
||||
BOOTSTRAP_OS: coreos
|
||||
|
||||
# Builds for PRs only (auto) and triggers (auto)
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
ETCD_DEPLOYMENT: rkt
|
||||
KUBELET_DEPLOYMENT: rkt
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
coreos-calico-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
@@ -405,12 +497,33 @@ coreos-alpha-weave-ha:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-rkt-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_rkt_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# Premoderated with manual actions
|
||||
ci-authorized:
|
||||
<<: *job
|
||||
stage: moderator
|
||||
before_script:
|
||||
- apt-get -y install jq
|
||||
script:
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||
except: ['triggers']
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
@@ -419,4 +532,4 @@ tox-inventory-builder:
|
||||
- pip install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
except: ['triggers', 'master']
|
||||
|
||||
69
README.md
69
README.md
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
##Deploy a production ready kubernetes cluster
|
||||
|
||||
@@ -14,75 +14,88 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
|
||||
To deploy the cluster you can use :
|
||||
|
||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
||||
**Ansible** usual commands <br>
|
||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||
|
||||
|
||||
* [Requirements](#requirements)
|
||||
* [Kargo vs ...](docs/comparisons.md)
|
||||
* [Getting started](docs/getting-started.md)
|
||||
* [Ansible inventory and tags](docs/ansible.md)
|
||||
* [Deployment data variables](docs/vars.md)
|
||||
* [DNS stack](docs/dns-stack.md)
|
||||
* [HA mode](docs/ha-mode.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Vagrant install](docs/vagrant.md)
|
||||
* [CoreOS bootstrap](docs/coreos.md)
|
||||
* [Ansible variables](docs/ansible.md)
|
||||
* [Downloaded artifacts](docs/downloads.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [OpenStack](docs/openstack.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Azure](docs/azure.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Large deployments](docs/large-deployments.md)
|
||||
* [Upgrades basics](docs/upgrades.md)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
|
||||
Supported Linux distributions
|
||||
===============
|
||||
|
||||
* **CoreOS**
|
||||
* **Debian** Wheezy, Jessie
|
||||
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
||||
* **Fedora** 23
|
||||
* **Container Linux by CoreOS**
|
||||
* **Debian** Jessie
|
||||
* **Ubuntu** 16.04
|
||||
* **CentOS/RHEL** 7
|
||||
|
||||
Versions
|
||||
--------------
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.6 <br>
|
||||
Versions of supported components
|
||||
--------------------------------
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||
[weave](http://weave.works/) v1.6.1 <br>
|
||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
||||
[docker](https://www.docker.com/) v1.12.5 <br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
||||
|
||||
Note: rkt support as docker alternative is limited to control plane (etcd and
|
||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||
plugins' related OS services. Also note, only one of the supported network
|
||||
plugins can be deployed for a given single cluster.
|
||||
|
||||
Requirements
|
||||
--------------
|
||||
|
||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
* The target servers are configured to allow **IPv4 forwarding**.
|
||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
||||
* **Ansible v2.x and python-netaddr**
|
||||
* **Ansible v2.2 (or newer) and python-netaddr**
|
||||
|
||||
|
||||
## Network plugins
|
||||
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
||||
You can choose between 4 network plugins. (default: `flannel` with vxlan backend)
|
||||
|
||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||
|
||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## CI Tests
|
||||
|
||||
[](https://travis-ci.org/kubernetes-incubator/kargo) </br>
|
||||

|
||||
|
||||
### Google Compute Engine
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
||||
|
||||
| Calico | Flannel | Weave |
|
||||
------------- | ------------- | ------------- | ------------- |
|
||||
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
||||
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
||||
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
||||
|
||||
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||
CI/end-to-end tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
||||
34
RELEASE.md
34
RELEASE.md
@@ -7,3 +7,37 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||
4. The release issue is closed
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
||||
|
||||
## Major/minor releases, merge freezes and milestones
|
||||
|
||||
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
||||
branches, and there are no backports. Therefore, there is no need for merge
|
||||
freezes as well.
|
||||
|
||||
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||
support lifetime, which ends once the milestone closed. Then only a next major
|
||||
or minor release can be done.
|
||||
|
||||
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||
Older or newer versions are not supported and not tested for the given release.
|
||||
|
||||
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
||||
[semver](http://semver.org/). Every version describes only a stable release.
|
||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||
the contributed addons or bound versions of Kubernetes and other components, are
|
||||
considered out of Kargo scope and are up to the components' teams to deal with and
|
||||
document.
|
||||
|
||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
foo
|
||||
foo
|
||||
foo
|
||||
|
||||
@@ -7,3 +7,5 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: rkt, tags: rkt, when: "'rkt' in [ etcd_deployment_type, kubelet_deployment_type ]" }
|
||||
|
||||
- hosts: etcd:!k8s-cluster
|
||||
any_errors_fatal: true
|
||||
@@ -45,7 +46,6 @@
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
|
||||
- hosts: calico-rr
|
||||
@@ -56,11 +56,10 @@
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, tags: resolvconf }
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
@@ -40,7 +40,8 @@ import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROLES = ['kube-master', 'all', 'k8s-cluster:children', 'kube-node', 'etcd']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
@@ -51,10 +52,18 @@ def get_var_as_bool(name, default):
|
||||
value = os.environ.get(name, '')
|
||||
return _boolean_states.get(value.lower(), default)
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
||||
class KargoInventory(object):
|
||||
|
||||
@@ -74,11 +83,16 @@ class KargoInventory(object):
|
||||
if changed_hosts:
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
self.set_etcd(list(self.hosts.keys())[:3])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_master(list(self.hosts.keys())[3:5])
|
||||
else:
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:3])
|
||||
else: # Show help if no options
|
||||
self.show_help()
|
||||
sys.exit(0)
|
||||
@@ -205,8 +219,32 @@ class KargoInventory(object):
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.config.items('kube-master'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.config.items('kube-node'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.config['all']) >= SCALE_THRESHOLD:
|
||||
if self.config.has_option('etcd', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
||||
if self.config.has_option('kube-master', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
@@ -275,7 +313,15 @@ print_ips - Write a space-delimited list of IPs from "all" group
|
||||
Advanced usage:
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1'''
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
'''
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
scm: git
|
||||
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-dashboard.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
@@ -210,3 +210,31 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
@@ -11,7 +11,7 @@ deps =
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = py.test -vv #{posargs:./tests}
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../roles/kubernetes-apps/lib
|
||||
@@ -30,13 +30,13 @@ requirements.
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
```
|
||||
|
||||
You will need two networks before installing, an internal network and
|
||||
You will need two networks before installing, an internal network and
|
||||
an external (floating IP Pool) network. The internet network can be shared as
|
||||
we use security groups to provide network segregation. Due to the many
|
||||
differences between OpenStack installs the Terraform does not attempt to create
|
||||
@@ -97,7 +97,7 @@ gfs_volume_size_in_gb = "50"
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
@@ -133,20 +133,20 @@ Make sure you can connect to the hosts:
|
||||
```
|
||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-etcd-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-k8s-master-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like CoreOS, these might have a state `FAILED` due to CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
# Valid bootstrap options (required): ubuntu, coreos, none
|
||||
bootstrap_os: none
|
||||
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# Uncomment this line for CoreOS only.
|
||||
# Directory where python binary is installed
|
||||
# ansible_python_interpreter: "/opt/bin/python"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_api_pwd: "changeme"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
# really inventory level variables, but described here for consistency.
|
||||
#
|
||||
# When advertising access, the access_ip will be used, but will defer to
|
||||
# ip and then the default ansible ip when unspecified.
|
||||
#
|
||||
# When binding to restrict access, the ip variable will be used, but will
|
||||
# defer to the default ansible ip when unspecified.
|
||||
#
|
||||
# The ip variable is used for specific address binding, e.g. listen address
|
||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
||||
# systems where one address should be preferred over another.
|
||||
# ip: 10.2.2.2
|
||||
#
|
||||
# The access_ip variable is used to define how other nodes should access
|
||||
# the node. This is used in flannel to allow other flannel nodes to see
|
||||
# this node for example. The access_ip is really useful AWS and Google
|
||||
# environments where the nodes are accessed remotely by the "public" ip,
|
||||
# but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
# Etcd access modes:
|
||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
etcd_multiaccess: true
|
||||
|
||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||
# kube_apiserver_port (default 443)
|
||||
loadbalancer_apiserver_localhost: true
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
kube_network_plugin: flannel
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network total size (optional). This is the prefix of the
|
||||
# entire network. Must be unused in your environment.
|
||||
# kube_network_prefix: 18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
peer_with_router: false
|
||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
# The subnets of each nodes will be distributed by the datacenter router
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
# as it greatly simplifies configuration of your applications - you can use
|
||||
# service names instead of magic environment variables.
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Do not install additional dnsmasq
|
||||
skip_dnsmasq: false
|
||||
# Upstream dns servers used by dnsmasq
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
#
|
||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
#
|
||||
# # Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# When azure is used, you need to also set the following variables.
|
||||
# cloud_provider:
|
||||
|
||||
# see docs/azure.md for details on how to get these values
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
# - name: kube-system/grafana
|
||||
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/group_vars/all.yml
|
||||
@@ -8,20 +8,39 @@ The inventory is composed of 3 groups:
|
||||
|
||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||
Note: if you want the server to act both as master and node the server must be defined on both groups _kube-master_ and _kube-node_
|
||||
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
||||
|
||||
Note: do not modify the children of _k8s-cluster_, like putting
|
||||
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
||||
to do that and you have it fully contained in the latter:
|
||||
|
||||
```
|
||||
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
||||
```
|
||||
|
||||
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
||||
If you want it a standalone, make sure those groups do not intersect.
|
||||
If you want the server to act both as master and node, the server must be defined
|
||||
on both groups _kube-master_ and _kube-node_. If you want a standalone and
|
||||
unschedulable master, the server must be defined only in the _kube-master_ and
|
||||
not _kube-node_.
|
||||
|
||||
There are also two special groups:
|
||||
|
||||
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
|
||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||
|
||||
Below is a complete inventory example:
|
||||
|
||||
```
|
||||
## Configure 'ip' variable to bind kubernetes services on a
|
||||
## different ip than the default iface
|
||||
node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
node1 ansible_ssh_host=95.54.0.12 ip=10.3.0.1
|
||||
node2 ansible_ssh_host=95.54.0.13 ip=10.3.0.2
|
||||
node3 ansible_ssh_host=95.54.0.14 ip=10.3.0.3
|
||||
node4 ansible_ssh_host=95.54.0.15 ip=10.3.0.4
|
||||
node5 ansible_ssh_host=95.54.0.16 ip=10.3.0.5
|
||||
node6 ansible_ssh_host=95.54.0.17 ip=10.3.0.6
|
||||
|
||||
[kube-master]
|
||||
node1
|
||||
@@ -42,12 +61,39 @@ node6
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
etcd
|
||||
```
|
||||
|
||||
Group vars
|
||||
--------------
|
||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
||||
Group vars and overriding variables precedence
|
||||
----------------------------------------------
|
||||
|
||||
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
||||
|
||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
those cannot be overriden from the group vars. In order to override, one should use
|
||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||
|
||||
Kargo uses only a few layers to override things (or expect them to
|
||||
be overriden for roles):
|
||||
|
||||
Layer | Comment
|
||||
------|--------
|
||||
**role defaults** | provides best UX to override things for Kargo deployments
|
||||
inventory vars | Unused
|
||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||
inventory host_vars | Unused
|
||||
playbook group_vars | Unuses
|
||||
playbook host_vars | Unused
|
||||
**host facts** | Kargo overrides for internal roles' logic, like state flags
|
||||
play vars | Unused
|
||||
play vars_prompt | Unused
|
||||
play vars_files | Unused
|
||||
registered vars | Unused
|
||||
set_facts | Kargo overrides those, for some places
|
||||
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
|
||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||
|
||||
Ansible tags
|
||||
------------
|
||||
@@ -132,5 +178,5 @@ bastion host.
|
||||
bastion ansible_ssh_host=x.x.x.x
|
||||
```
|
||||
|
||||
For more information about Ansible and bastion hosts, read
|
||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||
For more information about Ansible and bastion hosts, read
|
||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||
|
||||
25
docs/comparisons.md
Normal file
25
docs/comparisons.md
Normal file
@@ -0,0 +1,25 @@
|
||||
Kargo vs [Kops](https://github.com/kubernetes/kops)
|
||||
---------------
|
||||
|
||||
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
|
||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||
itself, and as such is less flexible in deployment platforms. For people with
|
||||
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
|
||||
however, is more tightly integrated with the unique features of the clouds it
|
||||
supports so it could be a better choice if you know that you will only be using
|
||||
one platform for the foreseeable future.
|
||||
|
||||
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||
------------------
|
||||
|
||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||
management, including self-hosted layouts, dynamic discovery services and so
|
||||
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||
it would've likely been named a "Kubernetes cluster operator". Kargo however,
|
||||
does generic configuration management tasks from the "OS operators" ansible
|
||||
world, plus some initial K8s clustering (with networking plugins included) and
|
||||
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
|
||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||
knowledge from it and offload generic OS configuration things from it, which
|
||||
hopefully benefits both sides.
|
||||
@@ -9,47 +9,14 @@ to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
||||
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
||||
|
||||
Note, custom ``ndots`` values affect only the dnsmasq daemon set (explained below).
|
||||
While the kubedns has the ``ndots=5`` hardcoded, which is not recommended due to
|
||||
[DNS performance reasons](https://github.com/kubernetes/kubernetes/issues/14051).
|
||||
You can use config maps for the kubedns app to workaround the issue, which is
|
||||
yet in the Kargo scope.
|
||||
|
||||
Additional search (sub)domains may be defined in the ``searchdomains``
|
||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||
``nameservers`` vars. Intranet/cloud provider DNS resolvers should be specified
|
||||
in the first place, followed by external resolvers, for example:
|
||||
DNS variables
|
||||
=============
|
||||
|
||||
```
|
||||
skip_dnsmasq: true
|
||||
nameservers: [8.8.8.8]
|
||||
upstream_dns_servers: [172.18.32.6]
|
||||
```
|
||||
or
|
||||
```
|
||||
skip_dnsmasq: false
|
||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||
```
|
||||
The vars are explained below. For the early cluster deployment stage, when there
|
||||
is yet K8s cluster and apps exist, a user may expect local repos to be
|
||||
accessible via authoritative intranet resolvers. For that case, if none custom vars
|
||||
was specified, the default resolver is set to either the cloud provider default
|
||||
or `8.8.8.8`. And domain is set to the default ``dns_domain`` value as well.
|
||||
Later, the nameservers will be reconfigured to the DNS service IP that Kargo
|
||||
configures for K8s cluster.
|
||||
There are several global variables which can be used to modify DNS settings:
|
||||
|
||||
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||
This is required for hostnet pods networking and for [kubelet to not exceed search domains
|
||||
limits](https://github.com/kubernetes/kubernetes/issues/9229).
|
||||
|
||||
Instead, new domain, search, nameserver records and options will be defined from the
|
||||
aforementioned vars:
|
||||
* Superseded via dhclient's DNS update hook.
|
||||
* Generated via cloud-init (CoreOS only).
|
||||
* Statically defined in the `/etc/resolv.conf`, if none of above is applicable.
|
||||
* Resolvconf's head/base files are disabled from populating anything into the
|
||||
`/etc/resolv.conf`.
|
||||
#### ndots
|
||||
ndots value to be used in ``/etc/resolv.conf``
|
||||
|
||||
It is important to note that multiple search domains combined with high ``ndots``
|
||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||
@@ -58,48 +25,97 @@ replies for [bogus internal FQDNS](https://github.com/kubernetes/kubernetes/issu
|
||||
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
||||
protective, but still recursive resolver in front of kubedns.
|
||||
|
||||
DNS configuration details
|
||||
-------------------------
|
||||
#### searchdomains
|
||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||
|
||||
Here is an approximate picture of how DNS things working and
|
||||
being configured by Kargo ansible playbooks:
|
||||
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||
to 256 characters. Depending on the length of ``dns_domain``, you're limitted to less then the total limit.
|
||||
|
||||

|
||||
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||
additional search domains. Please take this into the accounts for the limits.
|
||||
|
||||
Note that an additional dnsmasq daemon set is installed by Kargo
|
||||
by default. Kubelet will configure DNS base of all pods to use the
|
||||
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
||||
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
||||
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
||||
authoritative DNS server for the given cluser domain (and its subdomains
|
||||
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
||||
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
||||
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
||||
#### nameservers
|
||||
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
|
||||
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
|
||||
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
|
||||
|
||||
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
||||
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
||||
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
||||
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
||||
This mitigates existing Linux limitation of max 3 nameservers in the
|
||||
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
||||
clustered DNS services.
|
||||
#### upstream_dns_servers
|
||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||
|
||||
You can skip the dnsmasq daemon set install steps by setting the
|
||||
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
||||
the nameservers limitation. Sadly, there is no way to work around the
|
||||
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
||||
use the ``searchdomains`` var to define no more than a three custom domains.
|
||||
Remaining three slots are reserved for K8s cluster default subdomains.
|
||||
DNS modes supported by kargo
|
||||
============================
|
||||
|
||||
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
## dns_mode
|
||||
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
|
||||
|
||||
#### dnsmasq_kubedns (default)
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||
|
||||
#### kubedns
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
#### none
|
||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||
leaves you with a non functional cluster.
|
||||
|
||||
## resolvconf_mode
|
||||
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||
There are three modes available:
|
||||
|
||||
#### docker_dns (default)
|
||||
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
|
||||
|
||||
The following nameservers are added to the docker daemon (in the same order as listed here):
|
||||
* cluster nameserver (depends on dns_mode)
|
||||
* content of optional upstream_dns_servers variable
|
||||
* host system nameservers (read from hosts /etc/resolv.conf)
|
||||
|
||||
The following search domains are added to the docker daemon (in the same order as listed here):
|
||||
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
|
||||
* content of optional searchdomains variable
|
||||
* host system search domains (read from hosts /etc/resolv.conf)
|
||||
|
||||
The following dns options are added to the docker daemon
|
||||
* ndots:{{ ndots }}
|
||||
* timeout:2
|
||||
* attempts:2
|
||||
|
||||
For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking
|
||||
the --cluster_dns (either dnsmasq or kubedns, depending on dns_mode) kubelet option into account.
|
||||
For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which
|
||||
are not started/managed by k8s will also use these docker options.
|
||||
|
||||
The host system name servers are added to ensure name resolution is also working while cluster DNS is not
|
||||
running yet. This is especially important in early stages of cluster deployment. In this early stage,
|
||||
DNS queries to the cluster DNS will timeout after a few seconds, resulting in the system nameserver being
|
||||
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
|
||||
servers, which in turn will forward queries to the system nameserver if required.
|
||||
|
||||
#### host_resolvconf
|
||||
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||
|
||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||
stage (``dns_early: true``), ``/etc/resolv.conf`` is configured to use the DNS servers found in ``upstream_dns_servers``
|
||||
and ``nameservers``. Later, ``/etc/resolv.conf`` is reconfigured to use the cluster DNS server first, leaving
|
||||
the other nameservers as backups.
|
||||
|
||||
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||
|
||||
#### none
|
||||
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
|
||||
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
||||
cluster service names.
|
||||
|
||||
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
||||
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
||||
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
||||
things, it comes by the price of limited nameservers though. As you know now,
|
||||
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
||||
specify no more than a two nameservers for infra and/or external use.
|
||||
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
||||
and will be merged together with the ``skydns_server`` IP into the hots'
|
||||
``/etc/resolv.conf``.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 654 KiB |
@@ -1,10 +1,10 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
||||
|
||||
Here is a simple example on AWS:
|
||||
Here is a simple example on AWS:
|
||||
|
||||
* Create instances and generate the inventory
|
||||
|
||||
@@ -12,21 +12,45 @@ Here is a simple example on AWS:
|
||||
kargo aws --instances 3
|
||||
```
|
||||
|
||||
* Run the deployment
|
||||
* Run the deployment
|
||||
|
||||
```
|
||||
kargo deploy --aws -u centos -n calico
|
||||
```
|
||||
|
||||
Building your own inventory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
---------------------------
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_generator/inventory_generator.py)
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
|
||||
to create or modify an Ansible inventory. Currently, it is limited in
|
||||
functionality and is only use for making a basic Kargo cluster, but it does
|
||||
support creating large clusters.
|
||||
support creating large clusters. It now supports
|
||||
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||
certain threshold. Run inventory.py help for more information.
|
||||
|
||||
Example inventory generator usage:
|
||||
|
||||
```
|
||||
cp -r inventory my_inventory
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
|
||||
```
|
||||
|
||||
Starting custom deployment
|
||||
--------------------------
|
||||
|
||||
Once you have an inventory, you may want to customize deployment data vars
|
||||
and start the deployment:
|
||||
|
||||
```
|
||||
# Edit my_inventory/groups_vars/*.yaml to override data vars
|
||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yaml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
See more details in the [ansible guide](ansible.md).
|
||||
|
||||
@@ -33,15 +33,20 @@ proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient.
|
||||
where an external LB or virtual IP management is inconvenient. This option is
|
||||
configured by the variable `loadbalancer_apiserver_localhost`. You may also
|
||||
define the port the local internal loadbalancer users by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||
It is also import to note that Kargo will only configure kubelet and kube-proxy
|
||||
on non-master nodes to use the local internal loadbalancer.
|
||||
|
||||
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
||||
you will need to configure your own loadbalancer to achieve HA. Note that
|
||||
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
||||
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
||||
the `access_ip` or IP address of the first server node in the `kube-master`
|
||||
group. It can also configure clients to use endpoints for a given loadbalancer
|
||||
type. The following diagram shows how traffic to the apiserver is directed.
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||
a user and is not covered by ansible roles in Kargo. By default, it only configures
|
||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||
apiserver is directed.
|
||||
|
||||

|
||||
|
||||
@@ -90,7 +95,7 @@ Access endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB | http://lc:p | https://lc:sp |
|
||||
| Local LB | http://lc:p | https://lc:nsp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||
|
||||
@@ -99,7 +104,9 @@ Where:
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `lc` - localhost;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
||||
|
||||
|
||||
@@ -27,5 +27,15 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
end up with the 'm' skipped for docker as well. This is required as docker does not
|
||||
understand k8s units well.
|
||||
|
||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||
from host/network interruption much quicker with calico-rr. Note that
|
||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||
etcd role is okay).
|
||||
|
||||
* Check out the
|
||||
[Inventory](https://github.com/kubernetes-incubator/kargo/blob/master/docs/getting-started.md#building-your-own-inventory)
|
||||
section of the Getting started guide for tips on creating a large scale
|
||||
Ansible inventory.
|
||||
|
||||
For example, when deploying 200 nodes, you may want to run ansible with
|
||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||
|
||||
@@ -13,40 +13,41 @@ That would probably improve deployment speed and certs management [#553](https:/
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
||||
|
||||
### Provisionning and cloud providers
|
||||
- Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- On AWS autoscaling, multi AZ
|
||||
- On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||
- On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||
- **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||
|
||||
### Tests
|
||||
- Run kubernetes e2e tests
|
||||
- migrate to jenkins
|
||||
- [x] Run kubernetes e2e tests
|
||||
- [x] migrate to jenkins
|
||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||
- Full tests on GCE per day (All OS's, all network plugins)
|
||||
- trigger a single test per pull request
|
||||
- single test with the Ansible version n-1 per day
|
||||
- Test idempotency on on single OS but for all network plugins/container engines
|
||||
- single test on AWS per day
|
||||
- test different achitectures :
|
||||
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
||||
- [x] trigger a single test per pull request
|
||||
- [ ] ~~single test with the Ansible version n-1 per day~~
|
||||
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
||||
- [ ] single test on AWS per day
|
||||
- [x] test different achitectures :
|
||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
|
||||
### Lifecycle
|
||||
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- Drain worker node when shutting down/deleting an instance
|
||||
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- [ ] Drain worker node when shutting down/deleting an instance
|
||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||
|
||||
### Networking
|
||||
- romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- Opencontrail
|
||||
- Canal
|
||||
- Cloud Provider native networking (instead of our network plugins)
|
||||
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- [ ] Opencontrail
|
||||
- [x] Canal
|
||||
- [x] Cloud Provider native networking (instead of our network plugins)
|
||||
|
||||
### High availability
|
||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||
|
||||
@@ -14,9 +14,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
system_namespace: kube-system
|
||||
|
||||
# Logging directory (sysvinit systems)
|
||||
kube_log_dir: "/var/log/kubernetes"
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
|
||||
@@ -58,7 +55,7 @@ kube_users:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
@@ -130,27 +127,29 @@ peer_with_router: false
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
# local loadbalancer should use this port instead - default to kube_apiserver_port
|
||||
nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
# as it greatly simplifies configuration of your applications - you can use
|
||||
# service names instead of magic environment variables.
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Do not install additional dnsmasq
|
||||
skip_dnsmasq: false
|
||||
# Upstream dns servers used by dnsmasq
|
||||
# Can be dnsmasq_kubedns, kubedns or none
|
||||
dns_mode: dnsmasq_kubedns
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
|
||||
## Upstream dns servers used by dnsmasq
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
#
|
||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
#
|
||||
# # Ip address of the kubernetes skydns service
|
||||
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
|
||||
@@ -200,3 +199,8 @@ k8s_image_pull_policy: IfNotPresent
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
# - name: kube-system/grafana
|
||||
|
||||
# Settings for containerized control plane (etcd/kubelet)
|
||||
rkt_version: 1.21.0
|
||||
etcd_deployment_type: docker
|
||||
kubelet_deployment_type: docker
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
|
||||
# ## configure a bastion host if your nodes are not publicly reachable
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# bastion ansible_ssh_host=x.x.x.x
|
||||
|
||||
# [kube-master]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ansible
|
||||
ansible>=2.2.1
|
||||
netaddr
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
{% for h in groups['all'] %}
|
||||
{% if h != 'bastion' %}
|
||||
{% if vars.update({'hosts': vars['hosts'] + ' ' + hostvars[h]['ansible_ssh_host']}) %}{% endif %}
|
||||
{% if vars.update({'hosts': vars['hosts'] + ' ' + (hostvars[h].get('ansible_ssh_host') or hostvars[h]['ansible_host'])}) %}{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -18,4 +18,4 @@ Host {{ bastion_ip }}
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
||||
StrictHostKeyChecking no
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
#nameservers:
|
||||
# - 127.0.0.1
|
||||
|
||||
dns_forward_max: 150
|
||||
cache_size: 1000
|
||||
|
||||
# Versions
|
||||
dnsmasq_version: 2.72
|
||||
|
||||
@@ -18,9 +21,6 @@ dnsmasq_version: 2.72
|
||||
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||
|
||||
# Skip dnsmasq setup
|
||||
skip_dnsmasq: false
|
||||
|
||||
# Limits for dnsmasq/kubedns apps
|
||||
dns_cpu_limit: 100m
|
||||
dns_memory_limit: 170Mi
|
||||
|
||||
@@ -2,5 +2,5 @@
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.dnsmasq }}"
|
||||
when: not skip_dnsmasq|default(false) and download_localhost|default(false)
|
||||
when: dns_mode == 'dnsmasq_kubedns' and download_localhost|default(false)
|
||||
tags: [download, dnsmasq]
|
||||
|
||||
@@ -15,17 +15,20 @@ local=/{{ bogus_domains }}
|
||||
{% for srv in upstream_dns_servers %}
|
||||
server={{ srv }}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
no-resolv
|
||||
{% elif resolvconf_mode == 'host_resolvconf' %}
|
||||
{# The default resolver is only needed when the hosts resolv.conf was modified by us. If it was not modified, we can rely on dnsmasq to reuse the systems resolv.conf #}
|
||||
server={{ default_resolver }}
|
||||
no-resolv
|
||||
{% endif %}
|
||||
|
||||
{% if kube_log_level == '4' %}
|
||||
log-queries
|
||||
{% endif %}
|
||||
bogus-priv
|
||||
no-resolv
|
||||
no-negcache
|
||||
cache-size=1000
|
||||
cache-size={{ cache_size }}
|
||||
dns-forward-max={{ dns_forward_max }}
|
||||
max-cache-ttl=10
|
||||
max-ttl=20
|
||||
log-facility=-
|
||||
|
||||
@@ -20,8 +20,8 @@ spec:
|
||||
- dnsmasq
|
||||
args:
|
||||
- -k
|
||||
- "-7"
|
||||
- /etc/dnsmasq.d
|
||||
- -C
|
||||
- /etc/dnsmasq.d/01-kube-dns.conf
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
docker_version: '1.10'
|
||||
docker_version: '1.12'
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
|
||||
@@ -10,13 +10,12 @@
|
||||
|
||||
- name : Docker | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Docker | reload docker.socket
|
||||
service:
|
||||
name: docker.socket
|
||||
state: restarted
|
||||
when: ansible_os_family == 'CoreOS'
|
||||
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
|
||||
- name: Docker | reload docker
|
||||
service:
|
||||
|
||||
@@ -14,13 +14,17 @@
|
||||
skip: true
|
||||
tags: facts
|
||||
|
||||
- include: set_facts_dns.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
tags: facts
|
||||
|
||||
- name: check for minimum kernel version
|
||||
fail:
|
||||
msg: >
|
||||
docker requires a minimum kernel version of
|
||||
{{ docker_kernel_min_version }} on
|
||||
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||
when: (ansible_os_family != "CoreOS") and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||
tags: facts
|
||||
|
||||
- name: ensure docker repository public key is installed
|
||||
@@ -34,7 +38,7 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
when: ansible_os_family != "CoreOS"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: ensure docker repository is enabled
|
||||
action: "{{ docker_repo_info.pkg_repo }}"
|
||||
@@ -42,14 +46,13 @@
|
||||
repo: "{{item}}"
|
||||
state: present
|
||||
with_items: "{{ docker_repo_info.repos }}"
|
||||
when: (ansible_os_family != "CoreOS") and (docker_repo_info.repos|length > 0)
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_repo_info.repos|length > 0)
|
||||
|
||||
- name: Configure docker repository on RedHat/CentOS
|
||||
copy:
|
||||
src: "rh_docker.repo"
|
||||
dest: "/etc/yum.repos.d/docker.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and
|
||||
ansible_distribution_major_version >= 7
|
||||
when: ansible_distribution in ["CentOS","RedHat"]
|
||||
|
||||
- name: ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
@@ -62,15 +65,18 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
when: (ansible_os_family != "CoreOS") and (docker_package_info.pkgs|length > 0)
|
||||
notify: restart docker
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: Set docker upstart and sysvinit config
|
||||
include: non-systemd.yml
|
||||
when: ansible_service_mgr in ["sysvinit","upstart"]
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
raw: docker version -f "{{ '{{' }}.Client.Version{{ '}}' }}"
|
||||
register: docker_version
|
||||
failed_when: docker_version.stdout|version_compare('1.12', '<')
|
||||
changed_when: false
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
|
||||
- name: Set docker systemd config
|
||||
include: systemd.yml
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: ensure docker service is started and enabled
|
||||
service:
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
---
|
||||
# This uses lineinfile instead of templates for idempotency in files that may be modified by different roles
|
||||
- name: Set docker options config file path
|
||||
set_fact:
|
||||
docker_options_file: >-
|
||||
{%- if ansible_os_family == "Debian" -%}/etc/default/docker{%- elif ansible_os_family == "RedHat" -%}/etc/sysconfig/docker{%- endif -%}
|
||||
tags: facts
|
||||
|
||||
- name: Set docker options config variable name
|
||||
set_fact:
|
||||
docker_options_name: >-
|
||||
{%- if ansible_os_family == "Debian" -%}DOCKER_OPTS{%- elif ansible_os_family == "RedHat" -%}other_args{%- endif -%}
|
||||
tags: facts
|
||||
|
||||
- name: Set docker options config value to be written
|
||||
set_fact:
|
||||
docker_options_value: '"{{ docker_options }} $DOCKER_NETWORK_OPTIONS $DOCKER_STORAGE_OPTIONS $INSECURE_REGISTRY"'
|
||||
tags: facts
|
||||
|
||||
- name: Set docker options config line to be written
|
||||
set_fact:
|
||||
docker_options_line: "{{ docker_options_name }}={{ docker_options_value }}"
|
||||
tags: facts
|
||||
|
||||
- name: Set docker proxy lines to be written
|
||||
set_fact:
|
||||
docker_proxy_lines:
|
||||
- { name: "HTTP_PROXY", value: '"{{ http_proxy }}"' }
|
||||
- { name: "HTTPS_PROXY", value: '"{{ https_proxy }}"' }
|
||||
- { name: "NO_PROXY", value: '"{{ no_proxy }}"' }
|
||||
tags: facts
|
||||
|
||||
- name: Remove docker daemon proxy config lines that don't match desired lines
|
||||
lineinfile:
|
||||
dest: "{{ docker_options_file }}"
|
||||
regexp: "^{{ item.name }}=(?!{{ item.value|regex_escape() }})"
|
||||
state: absent
|
||||
with_items: "{{ docker_proxy_lines|default([]) }}"
|
||||
when: item.value is defined and (item.value | trim != '')
|
||||
|
||||
- name: Write docker daemon proxy config lines
|
||||
lineinfile:
|
||||
dest: "{{ docker_options_file }}"
|
||||
line: "{{ item.name }}={{ item.value }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
with_items: "{{ docker_proxy_lines|default([]) }}"
|
||||
when: item.value is defined and (item.value | trim != '')
|
||||
|
||||
- name: Remove docker daemon options lines that don't match desired line
|
||||
lineinfile:
|
||||
dest: "{{ docker_options_file }}"
|
||||
regexp: "^(DOCKER_OPTS|OPTIONS|other_args)=(?!{{ docker_options_value|regex_escape() }})"
|
||||
state: absent
|
||||
|
||||
- name: Write docker daemon options line
|
||||
lineinfile:
|
||||
dest: "{{ docker_options_file }}"
|
||||
line: "{{ docker_options_line }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify: restart docker
|
||||
|
||||
- meta: flush_handlers
|
||||
61
roles/docker/tasks/set_facts_dns.yml
Normal file
61
roles/docker/tasks/set_facts_dns.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
|
||||
- name: set dns server for docker
|
||||
set_fact:
|
||||
docker_dns_servers: |-
|
||||
{%- if dns_mode == 'kubedns' -%}
|
||||
{{ [ skydns_server ] }}
|
||||
{%- elif dns_mode == 'dnsmasq_kubedns' -%}
|
||||
{{ [ dns_server ] }}
|
||||
{%- endif -%}
|
||||
|
||||
- name: set base docker dns facts
|
||||
set_fact:
|
||||
docker_dns_search_domains:
|
||||
- 'default.svc.{{ dns_domain }}'
|
||||
- 'svc.{{ dns_domain }}'
|
||||
docker_dns_options:
|
||||
- ndots:{{ ndots }}
|
||||
- timeout:2
|
||||
- attempts:2
|
||||
|
||||
- name: add upstream dns servers (only when dnsmasq is not used)
|
||||
set_fact:
|
||||
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
|
||||
when: dns_mode == 'kubedns'
|
||||
|
||||
- name: add global searchdomains
|
||||
set_fact:
|
||||
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
|
||||
|
||||
- name: check system nameservers
|
||||
shell: grep "^nameserver" /etc/resolv.conf | sed 's/^nameserver\s*//'
|
||||
changed_when: False
|
||||
register: system_nameservers
|
||||
|
||||
- name: check system search domains
|
||||
shell: grep "^search" /etc/resolv.conf | sed 's/^search\s*//'
|
||||
changed_when: False
|
||||
register: system_search_domains
|
||||
|
||||
- name: add system nameservers to docker options
|
||||
set_fact:
|
||||
docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}"
|
||||
when: system_nameservers.stdout != ""
|
||||
|
||||
- name: add system search domains to docker options
|
||||
set_fact:
|
||||
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}"
|
||||
when: system_search_domains.stdout != ""
|
||||
|
||||
- name: check number of nameservers
|
||||
fail: msg="Too many nameservers"
|
||||
when: docker_dns_servers|length > 3
|
||||
|
||||
- name: check number of search domains
|
||||
fail: msg="Too many search domains"
|
||||
when: docker_dns_search_domains|length > 6
|
||||
|
||||
- name: check length of search domains
|
||||
fail: msg="Search domains exceeded limit of 256 characters"
|
||||
when: docker_dns_search_domains|join(' ')|length > 256
|
||||
@@ -13,7 +13,7 @@
|
||||
src: docker.service.j2
|
||||
dest: /etc/systemd/system/docker.service
|
||||
register: docker_service_file
|
||||
when: ansible_os_family != "CoreOS"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Write docker options systemd drop-in
|
||||
template:
|
||||
@@ -21,4 +21,11 @@
|
||||
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
|
||||
notify: restart docker
|
||||
|
||||
- name: Write docker dns systemd drop-in
|
||||
template:
|
||||
src: docker-dns.conf.j2
|
||||
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
|
||||
notify: restart docker
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
|
||||
- meta: flush_handlers
|
||||
6
roles/docker/templates/docker-dns.conf.j2
Normal file
6
roles/docker/templates/docker-dns.conf.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
[Service]
|
||||
Environment="DOCKER_DNS_OPTIONS=\
|
||||
{% for d in docker_dns_servers %}--dns {{ d }} {% endfor %} \
|
||||
{% for d in docker_dns_search_domains %}--dns-search {{ d }} {% endfor %} \
|
||||
{% for o in docker_dns_options %}--dns-opt {{ o }} {% endfor %} \
|
||||
"
|
||||
@@ -22,6 +22,7 @@ ExecStart={{ docker_bin_dir }}/docker daemon \
|
||||
$DOCKER_OPTS \
|
||||
$DOCKER_STORAGE_OPTIONS \
|
||||
$DOCKER_NETWORK_OPTIONS \
|
||||
$DOCKER_DNS_OPTIONS \
|
||||
$INSECURE_REGISTRY
|
||||
TasksMax=infinity
|
||||
LimitNOFILE=1048576
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
docker_kernel_min_version: '2.6.32-431'
|
||||
|
||||
# versioning: docker-io itself is pinned at docker 1.5
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: docker-io
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
@@ -1,12 +1,11 @@
|
||||
docker_kernel_min_version: '3.2'
|
||||
docker_kernel_min_version: '3.10'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.9': docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
||||
'1.10': docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.5-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.0-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
|
||||
@@ -2,8 +2,6 @@ docker_kernel_min_version: '0'
|
||||
|
||||
docker_versioned_pkg:
|
||||
'latest': docker
|
||||
'1.9': docker-1:1.9.1
|
||||
'1.10': docker-1:1.10.1
|
||||
'1.11': docker-1:1.11.2
|
||||
'1.12': docker-1:1.12.5
|
||||
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
||||
# or do 'yum --showduplicates list docker-engine'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.11': docker-engine-1.11.2-1.el7.centos
|
||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||
'1.13': docker-engine-1.13.0-1.el7.centos
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: docker-engine
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
docker_version: '1.11'
|
||||
docker_kernel_min_version: '3.2'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.5-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
force: yes
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
docker_version: '1.12'
|
||||
docker_kernel_min_version: '3.10'
|
||||
|
||||
docker_kernel_min_version: '3.2'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/ubuntu-trusty/main/filelist
|
||||
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.9': docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
||||
'1.10': docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||
'1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.5-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.0-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
local_release_dir: /tmp
|
||||
|
||||
# if this is set to true will only download files once. Doesn't work
|
||||
# on CoreOS unless the download_localhost is true and localhost
|
||||
# on Container Linux by CoreOS unless the download_localhost is true and localhost
|
||||
# is running another OS type. Default compress level is 9 (best).
|
||||
download_run_once: False
|
||||
download_compress: 9
|
||||
|
||||
# if this is set to true, uses the localhost for download_run_once mode
|
||||
# (requires docker and sudo to access docker). You may want this option for
|
||||
# local caching of docker images or for CoreOS cluster nodes.
|
||||
# local caching of docker images or for Container Linux by CoreOS cluster nodes.
|
||||
# Otherwise, uses the first node in the kube-master group to store images
|
||||
# in the download_run_once mode.
|
||||
download_localhost: False
|
||||
@@ -21,18 +21,16 @@ download_always_pull: False
|
||||
etcd_version: v3.0.6
|
||||
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: v1.0.0-beta
|
||||
calico_cni_version: v1.4.2
|
||||
weave_version: v1.6.1
|
||||
calico_version: "v1.0.0"
|
||||
calico_cni_version: "v1.5.5"
|
||||
weave_version: 1.8.2
|
||||
flannel_version: v0.6.2
|
||||
pod_infra_version: 3.0
|
||||
|
||||
# Download URL's
|
||||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
||||
|
||||
# Checksums
|
||||
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
|
||||
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
||||
|
||||
# Containers
|
||||
@@ -43,15 +41,13 @@ etcd_image_tag: "{{ etcd_version }}"
|
||||
flannel_image_repo: "quay.io/coreos/flannel"
|
||||
flannel_image_tag: "{{ flannel_version }}"
|
||||
calicoctl_image_repo: "calico/ctl"
|
||||
# TODO(apanchenko): v1.0.0-beta can't execute `node run` from Docker container
|
||||
# for details see https://github.com/projectcalico/calico-containers/issues/1291
|
||||
calicoctl_image_tag: "v1.0.0-rc3"
|
||||
calicoctl_image_tag: "{{ calico_version }}"
|
||||
calico_node_image_repo: "calico/node"
|
||||
calico_node_image_tag: "{{ calico_version }}"
|
||||
calico_cni_image_repo: "calico/cni"
|
||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||
calico_policy_image_repo: "calico/kube-policy-controller"
|
||||
calico_policy_image_tag: latest
|
||||
calico_policy_image_tag: "v0.5.1"
|
||||
# TODO(adidenko): switch to "calico/routereflector" when
|
||||
# https://github.com/projectcalico/calico-bird/pull/27 is merged
|
||||
calico_rr_image_repo: "quay.io/l23network/routereflector"
|
||||
@@ -68,6 +64,10 @@ netcheck_kubectl_tag: v0.18.0-120-gaeb4ac55ad12b1-dirty
|
||||
netcheck_agent_img_repo: "quay.io/l23network/mcp-netchecker-agent"
|
||||
netcheck_server_img_repo: "quay.io/l23network/mcp-netchecker-server"
|
||||
netcheck_kubectl_img_repo: "gcr.io/google_containers/kubectl"
|
||||
weave_kube_image_repo: "weaveworks/weave-kube"
|
||||
weave_kube_image_tag: "{{ weave_version }}"
|
||||
weave_npc_image_repo: "weaveworks/weave-npc"
|
||||
weave_npc_image_tag: "{{ weave_version }}"
|
||||
|
||||
nginx_image_repo: nginx
|
||||
nginx_image_tag: 1.11.4-alpine
|
||||
@@ -102,26 +102,17 @@ downloads:
|
||||
tag: "{{ netcheck_kubectl_tag }}"
|
||||
sha256: "{{ netcheck_kubectl_digest_checksum|default(None) }}"
|
||||
enabled: "{{ deploy_netchecker|bool }}"
|
||||
weave:
|
||||
dest: weave/bin/weave
|
||||
version: "{{weave_version}}"
|
||||
source_url: "{{weave_download_url}}"
|
||||
url: "{{weave_download_url}}"
|
||||
sha256: "{{ weave_checksum }}"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
etcd:
|
||||
version: "{{etcd_version}}"
|
||||
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
sha256: >-
|
||||
{%- if etcd_deployment_type == 'docker' -%}{{etcd_digest_checksum|default(None)}}{%- else -%}{{etcd_checksum}}{%- endif -%}
|
||||
{%- if etcd_deployment_type in [ 'docker', 'rkt' ] -%}{{etcd_digest_checksum|default(None)}}{%- else -%}{{etcd_checksum}}{%- endif -%}
|
||||
source_url: "{{ etcd_download_url }}"
|
||||
url: "{{ etcd_download_url }}"
|
||||
unarchive: true
|
||||
owner: "etcd"
|
||||
mode: "0755"
|
||||
container: "{{ etcd_deployment_type == 'docker' }}"
|
||||
container: "{{ etcd_deployment_type in [ 'docker', 'rkt' ] }}"
|
||||
repo: "{{ etcd_image_repo }}"
|
||||
tag: "{{ etcd_image_tag }}"
|
||||
hyperkube:
|
||||
@@ -165,6 +156,18 @@ downloads:
|
||||
tag: "{{ calico_rr_image_tag }}"
|
||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr}} and kube_network_plugin == 'calico'"
|
||||
weave_kube:
|
||||
container: true
|
||||
repo: "{{ weave_kube_image_repo }}"
|
||||
tag: "{{ weave_kube_image_tag }}"
|
||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
weave_npc:
|
||||
container: true
|
||||
repo: "{{ weave_npc_image_repo }}"
|
||||
tag: "{{ weave_npc_image_tag }}"
|
||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
pod_infra:
|
||||
container: true
|
||||
repo: "{{ pod_infra_image_repo }}"
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||
tags: bootstrap-os
|
||||
|
||||
# This is required for the download_localhost delegate to work smooth with CoreOS cluster nodes
|
||||
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
|
||||
- name: Hack python binary path for localhost
|
||||
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
|
||||
when: "{{ download_delegate == 'localhost' }}"
|
||||
@@ -119,7 +119,7 @@
|
||||
delegate_to: "{{ download_delegate }}"
|
||||
register: saved
|
||||
run_once: true
|
||||
when: (ansible_os_family != "CoreOS" or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool and (container_changed|bool or not img.stat.exists)
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool and (container_changed|bool or not img.stat.exists)
|
||||
|
||||
- name: Download | copy container images to ansible host
|
||||
synchronize:
|
||||
@@ -128,7 +128,7 @@
|
||||
mode: pull
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
when: ansible_os_family != "CoreOS" and inventory_hostname == groups['kube-master'][0] and download_delegate != "localhost" and download_run_once|bool and download.enabled|bool and download.container|bool and saved.changed
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname == groups['kube-master'][0] and download_delegate != "localhost" and download_run_once|bool and download.enabled|bool and download.container|bool and saved.changed
|
||||
|
||||
- name: Download | upload container images to nodes
|
||||
synchronize:
|
||||
@@ -141,10 +141,10 @@
|
||||
until: get_task|success
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: (ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
tags: [upload, upgrade]
|
||||
|
||||
- name: Download | load container images
|
||||
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
|
||||
when: (ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
tags: [upload, upgrade]
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
{%- if pull_by_digest|bool %}{{download.repo}}@sha256:{{download.sha256}}{%- else -%}{{download.repo}}:{{download.tag}}{%- endif -%}
|
||||
|
||||
- name: Register docker images info
|
||||
shell: "{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f {% raw %}'{{.RepoTags}},{{.RepoDigests}}'{% endraw %}"
|
||||
raw: >-
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}"
|
||||
register: docker_images_raw
|
||||
failed_when: false
|
||||
when: not download_always_pull|bool
|
||||
|
||||
@@ -71,14 +71,15 @@ fi
|
||||
# ETCD member
|
||||
if [ -n "$MASTERS" ]; then
|
||||
for host in $MASTERS; do
|
||||
cn="${host%%.*}"
|
||||
# Member key
|
||||
openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${host}" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
|
||||
# Admin key
|
||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${host}" > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
@@ -86,8 +87,9 @@ fi
|
||||
# Node keys
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${host}" > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
- name: etcd | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: reload etcd
|
||||
service:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
dependencies:
|
||||
- role: adduser
|
||||
user: "{{ addusers.etcd }}"
|
||||
when: ansible_os_family != 'CoreOS'
|
||||
when: not ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
- role: download
|
||||
file: "{{ downloads.etcd }}"
|
||||
tags: download
|
||||
|
||||
@@ -16,14 +16,5 @@
|
||||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
backup: yes
|
||||
when: ansible_service_mgr == "systemd" and is_etcd_master
|
||||
notify: restart etcd
|
||||
|
||||
- name: Configure | Write etcd initd script
|
||||
template:
|
||||
src: "deb-etcd-{{ etcd_deployment_type }}.initd.j2"
|
||||
dest: /etc/init.d/etcd
|
||||
owner: root
|
||||
mode: 0755
|
||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_master
|
||||
when: is_etcd_master
|
||||
notify: restart etcd
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
---
|
||||
|
||||
- name: Gen_certs | create etcd script dir
|
||||
file:
|
||||
path: "{{ etcd_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
when: inventory_hostname == groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | create etcd cert dir
|
||||
file:
|
||||
path={{ etcd_cert_dir }}
|
||||
@@ -15,6 +7,24 @@
|
||||
owner=root
|
||||
recurse=yes
|
||||
|
||||
- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
|
||||
file:
|
||||
path: "{{ etcd_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
|
||||
file:
|
||||
path={{ etcd_cert_dir }}
|
||||
group={{ etcd_cert_group }}
|
||||
state=directory
|
||||
owner=root
|
||||
recurse=yes
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: Gen_certs | write openssl config
|
||||
template:
|
||||
src: "openssl.conf.j2"
|
||||
@@ -40,7 +50,7 @@
|
||||
{{ m }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
- HOSTS: "{% for h in groups['k8s-cluster'] %}
|
||||
- HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
||||
{% if hostvars[h].sync_certs|default(false) %}
|
||||
{{ h }}
|
||||
{% endif %}
|
||||
@@ -65,7 +75,7 @@
|
||||
'member-{{ inventory_hostname }}-key.pem'
|
||||
]
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
||||
'node-{{ node }}.pem',
|
||||
'node-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
@@ -73,7 +83,9 @@
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather etcd master certs
|
||||
shell: "tar cfz - -C {{ etcd_cert_dir }} {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }}| base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ etcd_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_master_cert_data
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
||||
@@ -81,21 +93,28 @@
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Gather etcd node certs
|
||||
shell: "tar cfz - -C {{ etcd_cert_dir }} {{ my_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ etcd_cert_dir }} -T /dev/stdin <<< {{ my_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: etcd_node_cert_data
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when: inventory_hostname in groups['k8s-cluster'] and sync_certs|default(false) and
|
||||
inventory_hostname not in groups['etcd']
|
||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
||||
inventory_hostname in groups['k8s-cluster']) and
|
||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Copy certs on masters
|
||||
shell: "echo '{{etcd_master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ etcd_cert_dir }}"
|
||||
shell: "base64 -d <<< '{{etcd_master_cert_data.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
shell: "echo '{{etcd_node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ etcd_cert_dir }}"
|
||||
shell: "base64 -d <<< '{{etcd_node_cert_data.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: sync_certs|default(false) and
|
||||
inventory_hostname not in groups['etcd']
|
||||
@@ -121,7 +140,7 @@
|
||||
/usr/local/share/ca-certificates/etcd-ca.crt
|
||||
{%- elif ansible_os_family == "RedHat" -%}
|
||||
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
|
||||
{%- elif ansible_os_family == "CoreOS" -%}
|
||||
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
|
||||
/etc/ssl/certs/etcd-ca.pem
|
||||
{%- endif %}
|
||||
tags: facts
|
||||
@@ -133,9 +152,9 @@
|
||||
remote_src: true
|
||||
register: etcd_ca_cert
|
||||
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
|
||||
command: update-ca-certificates
|
||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
|
||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Gen_certs | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
|
||||
@@ -1,17 +1,6 @@
|
||||
---
|
||||
- name: Install | Copy etcd binary from downloaddir
|
||||
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
|
||||
when: etcd_deployment_type == "host"
|
||||
register: etcd_copy
|
||||
changed_when: false
|
||||
|
||||
- name: Install | Copy etcdctl binary from downloaddir
|
||||
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
|
||||
when: etcd_deployment_type == "host"
|
||||
changed_when: false
|
||||
|
||||
#Plan A: no docker-py deps
|
||||
- name: Install | Copy etcdctl binary from container
|
||||
- name: Install | Copy etcdctl binary from docker container
|
||||
command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
|
||||
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
||||
{{ docker_bin_dir }}/docker cp etcdctl-binarycopy:{{ etcd_container_bin_dir }}etcdctl {{ bin_dir }}/etcdctl &&
|
||||
9
roles/etcd/tasks/install_host.yml
Normal file
9
roles/etcd/tasks/install_host.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Install | Copy etcd binary from downloaddir
|
||||
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
|
||||
register: etcd_copy
|
||||
changed_when: false
|
||||
|
||||
- name: Install | Copy etcdctl binary from downloaddir
|
||||
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
|
||||
changed_when: false
|
||||
26
roles/etcd/tasks/install_rkt.yml
Normal file
26
roles/etcd/tasks/install_rkt.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Trust etcd container
|
||||
command: >-
|
||||
/usr/bin/rkt trust
|
||||
--skip-fingerprint-review
|
||||
--root
|
||||
https://quay.io/aci-signing-key
|
||||
register: etcd_rkt_trust_result
|
||||
until: etcd_rkt_trust_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Install | Copy etcdctl binary from rkt container
|
||||
command: >-
|
||||
/usr/bin/rkt run
|
||||
--volume=bin-dir,kind=host,source={{ bin_dir}},readOnly=false
|
||||
--mount=volume=bin-dir,target=/host/bin
|
||||
{{ etcd_image_repo }}:{{ etcd_image_tag }}
|
||||
--name=etcdctl-binarycopy
|
||||
--exec=/bin/cp -- {{ etcd_container_bin_dir }}/etcdctl /host/bin/etcdctl
|
||||
register: etcd_task_result
|
||||
until: etcd_task_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
@@ -5,7 +5,7 @@
|
||||
tags: [etcd-secrets, facts]
|
||||
- include: gen_certs.yml
|
||||
tags: etcd-secrets
|
||||
- include: install.yml
|
||||
- include: "install_{{ etcd_deployment_type }}.yml"
|
||||
when: is_etcd_master
|
||||
tags: upgrade
|
||||
- include: set_cluster_health.yml
|
||||
@@ -15,22 +15,22 @@
|
||||
- include: refresh_config.yml
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Restart etcd if binary or certs changed
|
||||
command: /bin/true
|
||||
notify: restart etcd
|
||||
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
|
||||
or etcd_secret_changed|default(false)
|
||||
|
||||
# Reload systemd before starting service
|
||||
# reload-systemd
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master
|
||||
|
||||
# After etcd cluster is assembled, make sure that
|
||||
# initial state of the cluster is in `existing`
|
||||
# state insted of `new`.
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -a
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: etcd
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: etcd distributed k/v store
|
||||
# Description:
|
||||
# etcd is a distributed, consistent key-value store for shared configuration and service discovery
|
||||
### END INIT INFO
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
|
||||
DESC="etcd k/v store"
|
||||
NAME=etcd
|
||||
DAEMON={{ docker_bin_dir }}/docker
|
||||
DAEMON_EXEC=`basename $DAEMON`
|
||||
DAEMON_ARGS="run --restart=on-failure:5 --env-file=/etc/etcd.env \
|
||||
--net=host \
|
||||
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
||||
-v /var/lib/etcd:/var/lib/etcd:rw \
|
||||
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
||||
--name={{ etcd_member_name | default("etcd") }} \
|
||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||
{% if etcd_after_v3 %}
|
||||
{{ etcd_container_bin_dir }}etcd
|
||||
{% endif %}"
|
||||
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||
PID=/var/run/etcd.pid
|
||||
|
||||
# Exit if the binary is not present
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_status()
|
||||
{
|
||||
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
}
|
||||
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name | default("etcd") }} &>/dev/null || true
|
||||
sleep 1
|
||||
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
|
||||
RETVAL="$?"
|
||||
|
||||
sleep 1
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
if do_stop; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_failure_msg "Can't stop etcd"
|
||||
log_end_msg 1
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if do_status; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_failure_msg "etcd is not running"
|
||||
log_end_msg 1
|
||||
fi
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
if do_stop; then
|
||||
if do_start; then
|
||||
log_end_msg 0
|
||||
exit 0
|
||||
else
|
||||
rc="$?"
|
||||
fi
|
||||
else
|
||||
rc="$?"
|
||||
fi
|
||||
log_failure_msg "Can't restart etcd"
|
||||
log_end_msg ${rc}
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -a
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: etcd
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: etcd distributed k/v store
|
||||
# Description:
|
||||
# etcd is a distributed, consistent key-value store for shared configuration and service discovery
|
||||
### END INIT INFO
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="etcd k/v store"
|
||||
NAME=etcd
|
||||
DAEMON={{ bin_dir }}/etcd
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=etcd
|
||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||
PID=/var/run/etcd.pid
|
||||
|
||||
# Exit if the binary is not present
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -f /etc/etcd.env ] && . /etc/etcd.env
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_status()
|
||||
{
|
||||
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
}
|
||||
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
|
||||
RETVAL="$?"
|
||||
|
||||
sleep 1
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
if do_stop; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_failure_msg "Can't stop etcd"
|
||||
log_end_msg 1
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if do_status; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_failure_msg "etcd is not running"
|
||||
log_end_msg 1
|
||||
fi
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
if do_stop; then
|
||||
if do_start; then
|
||||
log_end_msg 0
|
||||
exit 0
|
||||
else
|
||||
rc="$?"
|
||||
fi
|
||||
else
|
||||
rc="$?"
|
||||
fi
|
||||
log_failure_msg "Can't restart etcd"
|
||||
log_end_msg ${rc}
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
29
roles/etcd/templates/etcd-rkt.service.j2
Normal file
29
roles/etcd/templates/etcd-rkt.service.j2
Normal file
@@ -0,0 +1,29 @@
|
||||
[Unit]
|
||||
Description=etcd rkt wrapper
|
||||
Documentation=https://github.com/coreos/etcd
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
TimeoutStartSec=0
|
||||
LimitNOFILE=40000
|
||||
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--uuid-file-save=/var/run/etcd.uuid \
|
||||
--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
|
||||
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
||||
--volume=var-lib-etcd,kind=host,source=/var/lib/etcd,readOnly=false \
|
||||
--mount=volume=var-lib-etcd,target=/var/lib/etcd \
|
||||
--set-env-file=/etc/etcd.env \
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||
--name={{ etcd_member_name | default("etcd") }}
|
||||
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd.uuid
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd.uuid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -17,8 +17,6 @@ kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
|
||||
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
|
||||
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
|
||||
exechealthz_image_tag: "{{ exechealthz_version }}"
|
||||
calico_policy_image_repo: "calico/kube-policy-controller"
|
||||
calico_policy_image_tag: latest
|
||||
|
||||
# Limits for calico apps
|
||||
calico_policy_controller_cpu_limit: 100m
|
||||
@@ -31,9 +29,9 @@ deploy_netchecker: false
|
||||
netchecker_port: 31081
|
||||
agent_report_interval: 15
|
||||
netcheck_namespace: default
|
||||
agent_img: "quay.io/l23network/mcp-netchecker-agent:v0.1"
|
||||
server_img: "quay.io/l23network/mcp-netchecker-server:v0.1"
|
||||
kubectl_image: "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty"
|
||||
agent_img: "{{ netcheck_agent_img_repo }}:{{ netcheck_tag }}"
|
||||
server_img: "{{ netcheck_server_img_repo }}:{{ netcheck_tag }}"
|
||||
kubectl_image: "{{ netcheck_kubectl_img_repo }}:{{ netcheck_kubectl_tag }}"
|
||||
|
||||
# Limits for netchecker apps
|
||||
netchecker_agent_cpu_limit: 30m
|
||||
@@ -51,3 +49,5 @@ netchecker_kubectl_memory_requests: 64M
|
||||
|
||||
# SSL
|
||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
calico_cert_dir: "/etc/calico/certs"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
---
|
||||
- set_fact:
|
||||
calico_cert_dir: "{{ canal_cert_dir }}"
|
||||
when: kube_network_plugin == 'canal'
|
||||
tags: facts
|
||||
|
||||
- name: Write calico-policy-controller yaml
|
||||
template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
||||
- name: Start of Calico policy controller
|
||||
kube:
|
||||
name: "calico-policy-controller"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
delay: 6
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
|
||||
@@ -12,7 +13,7 @@
|
||||
- {file: kubedns-rc.yml, type: rc}
|
||||
- {file: kubedns-svc.yml, type: svc}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
tags: dnsmasq
|
||||
|
||||
- name: Kubernetes Apps | Start Resources
|
||||
@@ -24,7 +25,7 @@
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
tags: dnsmasq
|
||||
|
||||
- include: tasks/calico-policy-controller.yml
|
||||
|
||||
@@ -36,11 +36,11 @@ spec:
|
||||
- name: ETCD_ENDPOINTS
|
||||
value: "{{ etcd_access_endpoint }}"
|
||||
- name: ETCD_CA_CERT_FILE
|
||||
value: "{{ etcd_cert_dir }}/ca.pem"
|
||||
value: "{{ calico_cert_dir }}/ca_cert.crt"
|
||||
- name: ETCD_CERT_FILE
|
||||
value: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
value: "{{ calico_cert_dir }}/cert.crt"
|
||||
- name: ETCD_KEY_FILE
|
||||
value: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
value: "{{ calico_cert_dir }}/key.pem"
|
||||
# Location of the Kubernetes API - this shouldn't need to be
|
||||
# changed so long as it is used in conjunction with
|
||||
# CONFIGURE_ETC_HOSTS="true".
|
||||
@@ -53,10 +53,10 @@ spec:
|
||||
- name: CONFIGURE_ETC_HOSTS
|
||||
value: "true"
|
||||
volumeMounts:
|
||||
- mountPath: {{ etcd_cert_dir }}
|
||||
- mountPath: {{ calico_cert_dir }}
|
||||
name: etcd-certs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: {{ etcd_cert_dir }}
|
||||
path: {{ calico_cert_dir }}
|
||||
name: etcd-certs
|
||||
|
||||
@@ -1,305 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: kube
|
||||
short_description: Manage Kubernetes Cluster
|
||||
description:
|
||||
- Create, replace, remove, and stop resources within a Kubernetes Cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
name:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The name associated with resource
|
||||
filename:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path and filename of the resource(s) definition file.
|
||||
kubectl:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path to the kubectl bin
|
||||
namespace:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The namespace associated with the resource(s)
|
||||
resource:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
|
||||
label:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The labels used to filter specific resources.
|
||||
server:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The url for the API server that commands are executed against.
|
||||
force:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate to force delete, replace, or stop.
|
||||
all:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
|
||||
log_level:
|
||||
required: false
|
||||
default: 0
|
||||
description:
|
||||
- Indicates the level of verbosity of logging by kubectl.
|
||||
state:
|
||||
required: false
|
||||
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
|
||||
default: present
|
||||
description:
|
||||
- present handles checking existence or creating if definition file provided,
|
||||
absent handles deleting resource(s) based on other options,
|
||||
latest handles creating ore updating based on existence,
|
||||
reloaded handles updating resource(s) definition using definition file,
|
||||
stopped handles stopping resource(s) based on other options.
|
||||
requirements:
|
||||
- kubectl
|
||||
author: "Kenny Jones (@kenjones-cisco)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: test nginx is present
|
||||
kube: name=nginx resource=rc state=present
|
||||
|
||||
- name: test nginx is stopped
|
||||
kube: name=nginx resource=rc state=stopped
|
||||
|
||||
- name: test nginx is absent
|
||||
kube: name=nginx resource=rc state=absent
|
||||
|
||||
- name: test nginx is present
|
||||
kube: filename=/tmp/nginx.yml
|
||||
"""
|
||||
|
||||
|
||||
class KubeManager(object):
|
||||
|
||||
def __init__(self, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
self.kubectl = module.params.get('kubectl')
|
||||
if self.kubectl is None:
|
||||
self.kubectl = module.get_bin_path('kubectl', True)
|
||||
self.base_cmd = [self.kubectl]
|
||||
|
||||
if module.params.get('server'):
|
||||
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||
|
||||
if module.params.get('log_level'):
|
||||
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
|
||||
|
||||
if module.params.get('namespace'):
|
||||
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||
|
||||
self.all = module.params.get('all')
|
||||
self.force = module.params.get('force')
|
||||
self.name = module.params.get('name')
|
||||
self.filename = module.params.get('filename')
|
||||
self.resource = module.params.get('resource')
|
||||
self.label = module.params.get('label')
|
||||
|
||||
def _execute(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
try:
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||
except Exception as exc:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||
return out.splitlines()
|
||||
|
||||
def _execute_nofail(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out.splitlines()
|
||||
|
||||
def create(self, check=True):
|
||||
if check and self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['create']
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to create')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def replace(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['replace']
|
||||
|
||||
if self.force:
|
||||
cmd.append('--force')
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to reload')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def delete(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['delete']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to delete without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def exists(self):
|
||||
cmd = ['get']
|
||||
|
||||
if not self.resource:
|
||||
return False
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
cmd.append('--no-headers')
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all-namespaces')
|
||||
|
||||
result = self._execute_nofail(cmd)
|
||||
if not result:
|
||||
return False
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['stop']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to stop without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(),
|
||||
filename=dict(),
|
||||
namespace=dict(),
|
||||
resource=dict(),
|
||||
label=dict(),
|
||||
server=dict(),
|
||||
kubectl=dict(),
|
||||
force=dict(default=False, type='bool'),
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
)
|
||||
)
|
||||
|
||||
changed = False
|
||||
|
||||
manager = KubeManager(module)
|
||||
state = module.params.get('state')
|
||||
|
||||
if state == 'present':
|
||||
result = manager.create()
|
||||
|
||||
elif state == 'absent':
|
||||
result = manager.delete()
|
||||
|
||||
elif state == 'reloaded':
|
||||
result = manager.replace()
|
||||
|
||||
elif state == 'stopped':
|
||||
result = manager.stop()
|
||||
|
||||
elif state == 'latest':
|
||||
if manager.exists():
|
||||
manager.force = True
|
||||
result = manager.replace()
|
||||
else:
|
||||
result = manager.create(check=False)
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||
|
||||
if result:
|
||||
changed = True
|
||||
module.exit_json(changed=changed,
|
||||
msg='success: %s' % (' '.join(result))
|
||||
)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,3 +1,20 @@
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.calico_policy }}"
|
||||
when: ( enable_network_policy is defined and enable_network_policy == True ) or
|
||||
( kube_network_plugin == 'canal' )
|
||||
tags: [download, network, canal]
|
||||
- role: download
|
||||
file: "{{ downloads.netcheck_server }}"
|
||||
when: deploy_netchecker
|
||||
tags: [download, netchecker]
|
||||
- role: download
|
||||
file: "{{ downloads.netcheck_agent }}"
|
||||
when: deploy_netchecker
|
||||
tags: [download, netchecker]
|
||||
- role: download
|
||||
file: "{{ downloads.netcheck_kubectl }}"
|
||||
when: deploy_netchecker
|
||||
tags: [download, netchecker]
|
||||
- {role: kubernetes-apps/ansible, tags: apps}
|
||||
- {role: kubernetes-apps/kpm, tags: [apps, kpm]}
|
||||
|
||||
@@ -3,3 +3,6 @@ dependencies:
|
||||
- role: kubernetes-apps/network_plugin/canal
|
||||
when: kube_network_plugin == 'canal'
|
||||
tags: canal
|
||||
- role: kubernetes-apps/network_plugin/weave
|
||||
when: kube_network_plugin == 'weave'
|
||||
tags: weave
|
||||
|
||||
24
roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
Normal file
24
roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
- name: Weave | Start Resources
|
||||
run_once: true
|
||||
kube:
|
||||
name: "weave-net"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/weave-net.yml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
state: "{{ item | ternary('latest','present') }}"
|
||||
with_items: "{{ weave_manifest.changed }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
|
||||
|
||||
- name: "Weave | wait for weave to become available"
|
||||
uri:
|
||||
url: http://127.0.0.1:6784/status
|
||||
return_content: yes
|
||||
run_once: true
|
||||
register: weave_status
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
until: "{{ weave_status.status == 200 and
|
||||
'Status: ready' in weave_status.content }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
- name: Master | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Master | reload kubelet
|
||||
service:
|
||||
|
||||
@@ -11,21 +11,19 @@
|
||||
changed_when: false
|
||||
tags: [hyperkube, kubectl, upgrade]
|
||||
|
||||
- name: Gather kubectl bash completion
|
||||
command: "{{ bin_dir }}/kubectl completion bash"
|
||||
no_log: true
|
||||
register: kubectl_bash_completion
|
||||
- name: Install kubectl bash completion
|
||||
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
|
||||
#no_log: true
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags: kubectl
|
||||
|
||||
- name: Write kubectl bash completion
|
||||
copy:
|
||||
content: "{{ kubectl_bash_completion.stdout }}"
|
||||
dest: /etc/bash_completion.d/kubectl.sh
|
||||
- name: Set kubectl bash completion file
|
||||
file:
|
||||
path: /etc/bash_completion.d/kubectl.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_os_family in ["Debian","RedHat"] and kubectl_bash_completion.changed
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags: [kubectl, upgrade]
|
||||
|
||||
- name: Write kube-apiserver manifest
|
||||
|
||||
@@ -27,3 +27,5 @@ nginx_cpu_requests: 50m
|
||||
|
||||
nginx_image_repo: nginx
|
||||
nginx_image_tag: 1.11.4-alpine
|
||||
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
- name: Kubelet | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Kubelet | reload kubelet
|
||||
service:
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
---
|
||||
- name: Trust kubelet container
|
||||
command: >-
|
||||
/usr/bin/rkt trust
|
||||
--skip-fingerprint-review
|
||||
--root
|
||||
{{ item }}
|
||||
register: kubelet_rkt_trust_result
|
||||
until: kubelet_rkt_trust_result.rc == 0
|
||||
with_items:
|
||||
- "https://quay.io/aci-signing-key"
|
||||
- "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
when: kubelet_deployment_type == "rkt"
|
||||
|
||||
- name: create kubelet working directory
|
||||
file:
|
||||
state: directory
|
||||
path: /var/lib/kubelet
|
||||
when: kubelet_deployment_type == "rkt"
|
||||
|
||||
- name: install | Write kubelet systemd init file
|
||||
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
|
||||
when: ansible_service_mgr == "systemd"
|
||||
notify: restart kubelet
|
||||
|
||||
- name: install | Write kubelet initd script
|
||||
template: src=deb-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=0755 backup=yes
|
||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
|
||||
notify: restart kubelet
|
||||
|
||||
- name: install | Write kubelet initd script
|
||||
template: src=rh-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=0755 backup=yes
|
||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "RedHat"
|
||||
template: "src=kubelet.{{ kubelet_deployment_type }}.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes"
|
||||
notify: restart kubelet
|
||||
|
||||
- name: install | Install kubelet launch script
|
||||
template: src=kubelet-container.j2 dest="{{ bin_dir }}/kubelet" owner=kube mode=0755 backup=yes
|
||||
notify: restart kubelet
|
||||
when: kubelet_deployment_type == "docker"
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes node container manager
|
||||
# Description:
|
||||
# The Kubernetes container manager maintains docker state against a state file.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes container manager"
|
||||
NAME=kubelet
|
||||
DAEMON={{ bin_dir }}/kubelet
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r {{kube_config_dir}}/$NAME.env ] && . {{kube_config_dir}}/$NAME.env
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
{{ docker_bin_dir }}/docker rm -f kubelet &>/dev/null || true
|
||||
sleep 1
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
@@ -1,16 +1,23 @@
|
||||
#!/bin/bash
|
||||
{{ docker_bin_dir }}/docker run --privileged \
|
||||
--net=host --pid=host --name=kubelet --restart=on-failure:5 \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
-v {{kube_config_dir}}:{{kube_config_dir}} \
|
||||
-v /sys:/sys \
|
||||
-v /dev:/dev \
|
||||
-v {{ docker_daemon_graph }}:/var/lib/docker \
|
||||
-v /var/run:/var/run \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet \
|
||||
--memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||
nsenter --target=1 --mount --wd=. -- \
|
||||
./hyperkube kubelet \
|
||||
$@
|
||||
{{ docker_bin_dir }}/docker run \
|
||||
--net=host \
|
||||
--pid=host \
|
||||
--privileged \
|
||||
--name=kubelet \
|
||||
--restart=on-failure:5 \
|
||||
--memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
||||
--cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
-v /etc/ssl:/etc/ssl:ro \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
-v {{ dir }}:{{ dir }}:ro \
|
||||
{% endfor -%}
|
||||
-v /sys:/sys:ro \
|
||||
-v {{ docker_daemon_graph }}:/var/lib/docker:rw \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:shared \
|
||||
-v /var/run:/var/run:rw \
|
||||
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||
./hyperkube kubelet \
|
||||
$@
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
||||
# Logging directory
|
||||
KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
|
||||
{% else %}
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGGING="--logtostderr=true"
|
||||
{% endif %}
|
||||
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
|
||||
@@ -17,9 +12,9 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
|
||||
{% set kubelet_args_base %}--pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}{% endset %}
|
||||
|
||||
{# DNS settings for kubelet #}
|
||||
{% if dns_setup|bool and skip_dnsmasq|bool %}
|
||||
{% if dns_mode == 'kubedns' %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster_dns={{ skydns_server }}{% endset %}
|
||||
{% elif dns_setup|bool %}
|
||||
{% elif dns_mode == 'dnsmasq_kubedns' %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster_dns={{ dns_server }}{% endset %}
|
||||
{% else %}
|
||||
{% set kubelet_args_cluster_dns %}{% endset %}
|
||||
@@ -51,8 +46,3 @@ KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
|
||||
{% else %}
|
||||
KUBELET_CLOUDPROVIDER=""
|
||||
{% endif %}
|
||||
{% if ansible_service_mgr in ["sysvinit","upstart"] %}
|
||||
DAEMON_ARGS="$KUBE_LOGGING $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBELET_API_SERVER $KUBELET_ADDRESS \
|
||||
$KUBELET_HOSTNAME $KUBELET_ARGS $DOCKER_SOCKET $KUBELET_ARGS $KUBELET_NETWORK_PLUGIN \
|
||||
$KUBELET_CLOUDPROVIDER"
|
||||
{% endif %}
|
||||
|
||||
64
roles/kubernetes/node/templates/kubelet.rkt.service.j2
Normal file
64
roles/kubernetes/node/templates/kubelet.rkt.service.j2
Normal file
@@ -0,0 +1,64 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||
After=calico-node.service
|
||||
Wants=network.target calico-node.service
|
||||
{% else %}
|
||||
Wants=network.target
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
TimeoutStartSec=0
|
||||
LimitNOFILE=40000
|
||||
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
|
||||
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
|
||||
|
||||
EnvironmentFile={{kube_config_dir}}/kubelet.env
|
||||
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--volume dns,kind=host,source=/etc/resolv.conf \
|
||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
|
||||
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--volume etcd-ssl,kind=host,source={{ etcd_config_dir }},readOnly=true \
|
||||
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
||||
--volume run,kind=host,source=/run,readOnly=false \
|
||||
--volume usr-share-certs,kind=host,source=/usr/share/ca-certificates,readOnly=true \
|
||||
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
||||
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=dns,target=/etc/resolv.conf \
|
||||
--mount volume=etc-cni,target=/etc/cni \
|
||||
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \
|
||||
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--mount volume=etcd-ssl,target={{ etcd_config_dir }} \
|
||||
--mount volume=opt-cni,target=/opt/cni \
|
||||
--mount volume=run,target=/run \
|
||||
--mount volume=usr-share-certs,target=/usr/share/ca-certificates \
|
||||
--mount volume=var-lib-docker,target=/var/lib/docker \
|
||||
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
|
||||
--uuid-file-save=/var/run/kubelet.uuid \
|
||||
--debug --exec=/kubelet -- \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBELET_API_SERVER \
|
||||
$KUBELET_ADDRESS \
|
||||
$KUBELET_PORT \
|
||||
$KUBELET_HOSTNAME \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBELET_ARGS \
|
||||
$DOCKER_SOCKET \
|
||||
$KUBELET_REGISTER_NODE \
|
||||
$KUBELET_NETWORK_PLUGIN
|
||||
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet.uuid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -16,7 +16,7 @@ stream {
|
||||
}
|
||||
|
||||
server {
|
||||
listen {{ kube_apiserver_port }};
|
||||
listen 127.0.0.1:{{ nginx_kube_apiserver_port }};
|
||||
proxy_pass kube_apiserver;
|
||||
proxy_timeout 10m;
|
||||
proxy_connect_timeout 1s;
|
||||
|
||||
@@ -8,8 +8,8 @@ clusters:
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/node.pem
|
||||
client-key: {{ kube_cert_dir }}/node-key.pem
|
||||
client-certificate: {{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
client-key: {{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# /etc/rc.d/init.d/kubelet
|
||||
#
|
||||
# chkconfig: 2345 95 95
|
||||
# description: Daemon for kubelet (kubernetes.io)
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $local_fs $network $syslog cgconfig
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: start and stop kubelet
|
||||
# Description:
|
||||
# The Kubernetes container manager maintains docker state against a state file.
|
||||
### END INIT INFO
|
||||
|
||||
# Source function library.
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
prog="kubelet"
|
||||
exec="{{ bin_dir }}/$prog"
|
||||
pidfile="/var/run/$prog.pid"
|
||||
lockfile="/var/lock/subsys/$prog"
|
||||
logfile="/var/log/$prog"
|
||||
|
||||
[ -e {{kube_config_dir}}/$prog.env ] && . {{kube_config_dir}}/$prog.env
|
||||
|
||||
start() {
|
||||
if [ ! -x $exec ]; then
|
||||
if [ ! -e $exec ]; then
|
||||
echo "Docker executable $exec not found"
|
||||
else
|
||||
echo "You do not have permission to execute the Docker executable $exec"
|
||||
fi
|
||||
exit 5
|
||||
fi
|
||||
|
||||
check_for_cleanup
|
||||
|
||||
if ! [ -f $pidfile ]; then
|
||||
printf "Starting $prog:\t"
|
||||
echo "\n$(date)\n" >> $logfile
|
||||
$exec $DAEMON_ARGS &>> $logfile &
|
||||
pid=$!
|
||||
echo $pid >> $pidfile
|
||||
touch $lockfile
|
||||
success
|
||||
echo
|
||||
else
|
||||
failure
|
||||
echo
|
||||
printf "$pidfile still exists...\n"
|
||||
exit 7
|
||||
fi
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping $prog: "
|
||||
killproc -p $pidfile -d 300 $prog
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && rm -f $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
status -p $pidfile $prog
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
check_for_cleanup() {
|
||||
if [ -f ${pidfile} ]; then
|
||||
/bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
|
||||
fi
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
@@ -24,11 +24,11 @@ openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
|
||||
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
|
||||
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
|
||||
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
|
||||
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID') }}"
|
||||
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
|
||||
|
||||
# All clients access each node individually, instead of using a load balancer.
|
||||
etcd_multiaccess: true
|
||||
|
||||
# CoreOS cloud init config file to define /etc/resolv.conf content
|
||||
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
|
||||
# for hostnet pods and infra needs
|
||||
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
notify:
|
||||
- Preinstall | reload network
|
||||
- Preinstall | reload kubelet
|
||||
when: ansible_os_family != "CoreOS"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
# FIXME(bogdando) https://github.com/projectcalico/felix/issues/1185
|
||||
- name: Preinstall | reload network
|
||||
@@ -15,18 +15,18 @@
|
||||
networking
|
||||
{%- endif %}
|
||||
state: restarted
|
||||
when: ansible_os_family != "CoreOS" and kube_network_plugin not in ['canal', 'calico']
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and kube_network_plugin not in ['canal', 'calico']
|
||||
|
||||
- name: Preinstall | update resolvconf for CoreOS
|
||||
- name: Preinstall | update resolvconf for Container Linux by CoreOS
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Preinstall | apply resolvconf cloud-init
|
||||
- Preinstall | reload kubelet
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Preinstall | apply resolvconf cloud-init
|
||||
command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Preinstall | reload kubelet
|
||||
service:
|
||||
|
||||
23
roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
Normal file
23
roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
|
||||
# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x
|
||||
# or when changing resolvconf_mode)
|
||||
|
||||
- name: Remove kargo specific config from dhclient config
|
||||
blockinfile:
|
||||
dest: "{{dhclientconffile}}"
|
||||
state: absent
|
||||
backup: yes
|
||||
follow: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
when: dhclientconffile is defined
|
||||
notify: Preinstall | restart network
|
||||
|
||||
- name: Remove kargo specific dhclient hook
|
||||
file: path="{{ dhclienthookfile }}" state=absent
|
||||
when: dhclienthookfile is defined
|
||||
notify: Preinstall | restart network
|
||||
|
||||
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
||||
# nameservers and search domains
|
||||
- meta: flush_handlers
|
||||
@@ -13,6 +13,7 @@
|
||||
follow: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
notify: Preinstall | restart network
|
||||
when: dhclientconffile is defined
|
||||
|
||||
- name: Configue dhclient hooks for resolv.conf (non-RH)
|
||||
template:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
blockinfile:
|
||||
dest: /etc/hosts
|
||||
block: |-
|
||||
{% for item in groups['all'] -%}{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4.address)) }}{% if (item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
|
||||
{% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}{% if (item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
|
||||
{% endfor %}
|
||||
state: present
|
||||
create: yes
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
---
|
||||
- name: Force binaries directory for CoreOS
|
||||
- include: pre-upgrade.yml
|
||||
tags: [upgrade, bootstrap-os]
|
||||
|
||||
- name: Force binaries directory for Container Linux by CoreOS
|
||||
set_fact:
|
||||
bin_dir: "/opt/bin"
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: facts
|
||||
|
||||
- name: check bin dir exists
|
||||
@@ -59,14 +62,6 @@
|
||||
when: "{{ inventory_hostname in groups['k8s-cluster'] }}"
|
||||
tags: [kubelet, bootstrap-os, master, node]
|
||||
|
||||
- name: Create kubernetes logs directory
|
||||
file:
|
||||
path: "{{ kube_log_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
when: ansible_service_mgr in ["sysvinit","upstart"] and "{{ inventory_hostname in groups['k8s-cluster'] }}"
|
||||
tags: [bootstrap-os, master, node]
|
||||
|
||||
- name: check cloud_provider value
|
||||
fail:
|
||||
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure' or 'openstack'"
|
||||
@@ -81,17 +76,6 @@
|
||||
when: cloud_provider is defined and cloud_provider == 'azure'
|
||||
tags: [cloud-provider, azure, facts]
|
||||
|
||||
- name: Enable ip forwarding
|
||||
lineinfile:
|
||||
dest: /etc/sysctl.d/99-sysctl.conf
|
||||
regexp: '^net.ipv4.ip_forward='
|
||||
line: 'net.ipv4.ip_forward=1'
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
validate: 'sysctl -f %s'
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Create cni directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
@@ -122,8 +106,7 @@
|
||||
|
||||
- name: Install epel-release on RedHat/CentOS
|
||||
shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and
|
||||
ansible_distribution_major_version >= 7
|
||||
when: ansible_distribution in ["CentOS","RedHat"]
|
||||
changed_when: False
|
||||
tags: bootstrap-os
|
||||
|
||||
@@ -137,16 +120,7 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
|
||||
when: ansible_os_family != "CoreOS"
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Disable IPv6 DNS lookup
|
||||
lineinfile:
|
||||
dest: /etc/gai.conf
|
||||
line: "precedence ::ffff:0:0/96 100"
|
||||
state: present
|
||||
backup: yes
|
||||
when: disable_ipv6_dns and ansible_os_family != "CoreOS"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: bootstrap-os
|
||||
|
||||
# Todo : selinux configuration
|
||||
@@ -156,13 +130,33 @@
|
||||
changed_when: False
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Disable IPv6 DNS lookup
|
||||
lineinfile:
|
||||
dest: /etc/gai.conf
|
||||
line: "precedence ::ffff:0:0/96 100"
|
||||
state: present
|
||||
backup: yes
|
||||
when: disable_ipv6_dns and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Enable ip forwarding
|
||||
lineinfile:
|
||||
dest: /etc/sysctl.d/99-sysctl.conf
|
||||
regexp: '^net.ipv4.ip_forward='
|
||||
line: 'net.ipv4.ip_forward=1'
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
validate: 'sysctl -f %s'
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Write openstack cloud-config
|
||||
template:
|
||||
src: openstack-cloud-config.j2
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when: cloud_provider is defined and cloud_provider == "openstack"
|
||||
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "openstack"
|
||||
tags: [cloud-provider, openstack]
|
||||
|
||||
- name: Write azure cloud-config
|
||||
@@ -171,15 +165,24 @@
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when: cloud_provider is defined and cloud_provider == "azure"
|
||||
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "azure"
|
||||
tags: [cloud-provider, azure]
|
||||
|
||||
- include: etchosts.yml
|
||||
tags: [bootstrap-os, etchosts]
|
||||
|
||||
- include: resolvconf.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
|
||||
- include: dhclient-hooks.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
- include: dhclient-hooks-undo.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode != 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
- name: Check if we are running inside a Azure VM
|
||||
stat: path=/var/lib/waagent/
|
||||
register: azure_check
|
||||
@@ -187,7 +190,6 @@
|
||||
|
||||
- include: growpart-azure-centos-7.yml
|
||||
when: azure_check.stat.exists and
|
||||
ansible_distribution in ["CentOS","RedHat"] and
|
||||
ansible_distribution_major_version >= 7
|
||||
ansible_distribution in ["CentOS","RedHat"]
|
||||
tags: bootstrap-os
|
||||
|
||||
|
||||
4
roles/kubernetes/preinstall/tasks/pre-upgrade.yml
Normal file
4
roles/kubernetes/preinstall/tasks/pre-upgrade.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: Stop if non systemd OS type
|
||||
assert:
|
||||
that: ansible_service_mgr == "systemd"
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: create temporary resolveconf cloud init file
|
||||
command: cp -f /etc/resolv.conf "{{ resolvconffile }}"
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Remove search/domain/nameserver options
|
||||
lineinfile:
|
||||
@@ -48,7 +48,7 @@
|
||||
- name: get temporary resolveconf cloud init file content
|
||||
command: cat {{ resolvconffile }}
|
||||
register: cloud_config
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: persist resolvconf cloud init file
|
||||
template:
|
||||
@@ -56,9 +56,5 @@
|
||||
src: resolvconf.j2
|
||||
owner: root
|
||||
mode: 0644
|
||||
notify: Preinstall | update resolvconf for CoreOS
|
||||
when: ansible_os_family == "CoreOS"
|
||||
|
||||
- include: dhclient-hooks.yml
|
||||
when: ansible_os_family != "CoreOS"
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
notify: Preinstall | update resolvconf for Container Linux by CoreOS
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- set_fact:
|
||||
kube_apiserver_endpoint: |-
|
||||
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ kube_apiserver_port }}
|
||||
https://localhost:{{ nginx_kube_apiserver_port }}
|
||||
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
|
||||
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
|
||||
{%- else -%}
|
||||
|
||||
@@ -17,16 +17,16 @@
|
||||
default_resolver: >-
|
||||
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}169.254.169.254{%- else -%}8.8.8.8{%- endif -%}
|
||||
|
||||
- name: check kubelet
|
||||
- name: check if kubelet is configured
|
||||
stat:
|
||||
path: "{{ bin_dir }}/kubelet"
|
||||
register: kubelet
|
||||
path: "{{ kube_config_dir }}/kubelet.env"
|
||||
register: kubelet_configured
|
||||
changed_when: false
|
||||
|
||||
- name: check if early DNS configuration stage
|
||||
set_fact:
|
||||
dns_early: >-
|
||||
{%- if kubelet.stat.exists -%}false{%- else -%}true{%- endif -%}
|
||||
{%- if kubelet_configured.stat.exists -%}false{%- else -%}true{%- endif -%}
|
||||
|
||||
- name: target resolv.conf files
|
||||
set_fact:
|
||||
@@ -35,21 +35,37 @@
|
||||
{%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%}
|
||||
head: >-
|
||||
{%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
|
||||
when: ansible_os_family != "CoreOS"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: target temporary resolvconf cloud init file (CoreOS)
|
||||
- name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
|
||||
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf
|
||||
when: ansible_os_family == "CoreOS"
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: target dhclient conf/hook files for Red Hat family
|
||||
- name: check if /etc/dhclient.conf exists
|
||||
stat: path=/etc/dhclient.conf
|
||||
register: dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhclient.conf
|
||||
when: dhclient_stat.stat.exists
|
||||
|
||||
- name: check if /etc/dhcp/dhclient.conf exists
|
||||
stat: path=/etc/dhcp/dhclient.conf
|
||||
register: dhcp_dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhcp/dhclient.conf
|
||||
when: dhcp_dhclient_stat.stat.exists
|
||||
|
||||
- name: target dhclient hook file for Red Hat family
|
||||
set_fact:
|
||||
dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: target dhclient conf/hook files for Debian family
|
||||
- name: target dhclient hook file for Debian family
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhcp/dhclient.conf
|
||||
dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
@@ -67,7 +83,7 @@
|
||||
- name: pick dnsmasq cluster IP or default resolver
|
||||
set_fact:
|
||||
dnsmasq_server: |-
|
||||
{%- if skip_dnsmasq|bool and not dns_early|bool -%}
|
||||
{%- if dns_mode == 'kubedns' and not dns_early|bool -%}
|
||||
{{ [ skydns_server ] + upstream_dns_servers|default([]) }}
|
||||
{%- elif dns_early|bool -%}
|
||||
{{ upstream_dns_servers|default([]) }}
|
||||
|
||||
@@ -27,8 +27,11 @@ Usage : $(basename $0) -f <config> [-d <ssldir>]
|
||||
-f | --config : Openssl configuration file
|
||||
-d | --ssldir : Directory where the certificates will be installed
|
||||
|
||||
ex :
|
||||
$(basename $0) -f openssl.conf -d /srv/ssl
|
||||
Environmental variables MASTERS and HOSTS should be set to generate keys
|
||||
for each host.
|
||||
|
||||
ex :
|
||||
MASTERS=node1 HOSTS="node1 node2" $(basename $0) -f openssl.conf -d /srv/ssl
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -61,21 +64,42 @@ cd "${tmpdir}"
|
||||
mkdir -p "${SSLDIR}"
|
||||
|
||||
# Root CA
|
||||
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||
if [ -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# Reuse existing CA
|
||||
cp $SSLDIR/{ca.pem,ca-key.pem} .
|
||||
else
|
||||
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Apiserver
|
||||
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
cat ca.pem >> apiserver.pem
|
||||
if [ ! -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# kube-apiserver key
|
||||
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
cat ca.pem >> apiserver.pem
|
||||
fi
|
||||
|
||||
if [ -n "$MASTERS" ]; then
|
||||
for host in $MASTERS; do
|
||||
cn="${host%%.*}"
|
||||
# admin key
|
||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# Nodes and Admin
|
||||
for i in node admin; do
|
||||
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
|
||||
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
# node key
|
||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# Install certs
|
||||
mv *.pem ${SSLDIR}/
|
||||
|
||||
@@ -1,36 +1,49 @@
|
||||
---
|
||||
- name: "Check_certs | check if the certs have already been generated on first master"
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.pem"
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
register: kubecert_master
|
||||
run_once: true
|
||||
with_items: >-
|
||||
['ca.pem',
|
||||
{% for host in groups['k8s-cluster'] %}
|
||||
'node-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
|
||||
- name: "Check_certs | Set default value for 'sync_certs' and 'gen_certs' to false"
|
||||
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
|
||||
set_fact:
|
||||
sync_certs: false
|
||||
gen_certs: false
|
||||
secret_changed: false
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' and 'gen_certs' to true"
|
||||
- name: "Check_certs | Set 'gen_certs' to true"
|
||||
set_fact:
|
||||
gen_certs: true
|
||||
when: not kubecert_master.stat.exists
|
||||
when: "not {{ item.stat.exists }}"
|
||||
run_once: true
|
||||
with_items: "{{ kubecert_master.results }}"
|
||||
|
||||
- name: "Check certs | check if a cert already exists"
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.pem"
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
register: kubecert
|
||||
with_items:
|
||||
- ca.pem
|
||||
- node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' to true"
|
||||
set_fact:
|
||||
sync_certs: true
|
||||
when: >-
|
||||
{%- set certs = {'sync': False} -%}
|
||||
{%- for server in play_hosts
|
||||
if (not hostvars[server].kubecert.stat.exists|default(False)) or
|
||||
(hostvars[server].kubecert.stat.checksum|default('') != kubecert_master.stat.checksum|default('')) -%}
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{%- for host in groups['k8s-cluster'] %}
|
||||
{% if host == inventory_hostname %}
|
||||
{% if (not kubecert.results[0].stat.exists|default(False)) or
|
||||
(kubecert.results[1].stat.checksum|default('') != kubecert_master.results[loop.index].stat.checksum|default('')) -%}
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{%- endfor -%}
|
||||
{{ certs.sync }}
|
||||
run_once: true
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
---
|
||||
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_config_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
|
||||
when: gen_certs|default(false)
|
||||
|
||||
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_script_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [k8s-secrets, bootstrap-os]
|
||||
when: gen_certs|default(false)
|
||||
|
||||
- name: Gen_certs | write openssl config
|
||||
template:
|
||||
src: "openssl.conf.j2"
|
||||
@@ -18,42 +38,103 @@
|
||||
|
||||
- name: Gen_certs | run cert generation script
|
||||
command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl.conf -d {{ kube_cert_dir }}"
|
||||
environment:
|
||||
- MASTERS: "{% for m in groups['kube-master'] %}
|
||||
{% if hostvars[m].sync_certs|default(true) %}
|
||||
{{ m }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
- HOSTS: "{% for h in groups['k8s-cluster'] %}
|
||||
{% if hostvars[h].sync_certs|default(true) %}
|
||||
{{ h }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
when: gen_certs|default(false)
|
||||
notify: set secret_changed
|
||||
|
||||
- set_fact:
|
||||
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
|
||||
node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
|
||||
all_master_certs: "['ca-key.pem',
|
||||
{% for node in groups['kube-master'] %}
|
||||
'admin-{{ node }}.pem',
|
||||
'admin-{{ node }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem',
|
||||
{% endfor %}]"
|
||||
my_master_certs: ['ca-key.pem',
|
||||
'admin-{{ inventory_hostname }}.pem',
|
||||
'admin-{{ inventory_hostname }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem'
|
||||
]
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
'node-{{ node }}.pem',
|
||||
'node-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem']
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather master certs
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: master_cert_data
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
run_once: true
|
||||
when: sync_certs|default(false)
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Gather node certs
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_cert_data
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
run_once: true
|
||||
when: sync_certs|default(false)
|
||||
when: inventory_hostname in groups['kube-node'] and
|
||||
sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Copy certs on masters
|
||||
shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
|
||||
#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
|
||||
#char limit when using shell command
|
||||
|
||||
#FIXME(mattymo): Use tempfile module in ansible 2.3
|
||||
- name: Gen_certs | Prepare tempfile for unpacking certs
|
||||
shell: mktemp /tmp/certsXXXXX.tar.gz
|
||||
register: cert_tempfile
|
||||
|
||||
- name: Gen_certs | Write master certs to tempfile
|
||||
copy:
|
||||
content: "{{master_cert_data.stdout}}"
|
||||
dest: "{{cert_tempfile.stdout}}"
|
||||
owner: root
|
||||
mode: "0600"
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Unpack certs on masters
|
||||
shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}"
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
notify: set secret_changed
|
||||
|
||||
- name: Gen_certs | Cleanup tempfile
|
||||
file:
|
||||
path: "{{cert_tempfile.stdout}}"
|
||||
state: absent
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
|
||||
shell: "base64 -d <<< '{{node_cert_data.stdout|quote}}' | tar xz -C {{ kube_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['kube-node'] and
|
||||
sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
notify: set secret_changed
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
file:
|
||||
@@ -74,11 +155,25 @@
|
||||
/usr/local/share/ca-certificates/kube-ca.crt
|
||||
{%- elif ansible_os_family == "RedHat" -%}
|
||||
/etc/pki/ca-trust/source/anchors/kube-ca.crt
|
||||
{%- elif ansible_os_family == "CoreOS" -%}
|
||||
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
|
||||
/etc/ssl/certs/kube-ca.pem
|
||||
{%- endif %}
|
||||
tags: facts
|
||||
|
||||
- name: SSL CA directories | Set SSL CA directories
|
||||
set_fact:
|
||||
ssl_ca_dirs: "[
|
||||
{% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
|
||||
'/usr/share/ca-certificates',
|
||||
{% elif ansible_os_family == 'RedHat' -%}
|
||||
'/etc/pki/tls',
|
||||
'/etc/pki/ca-trust',
|
||||
{% elif ansible_os_family == 'Debian' -%}
|
||||
'/usr/share/ca-certificates',
|
||||
{% endif -%}
|
||||
]"
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | add CA to trusted CA dir
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/ca.pem"
|
||||
@@ -86,9 +181,9 @@
|
||||
remote_src: true
|
||||
register: kube_ca_cert
|
||||
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
|
||||
command: update-ca-certificates
|
||||
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
|
||||
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Gen_certs | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
|
||||
@@ -35,6 +35,41 @@
|
||||
when: inventory_hostname in "{{ groups['kube-master'] }}"
|
||||
notify: set secret_changed
|
||||
|
||||
#
|
||||
# The following directory creates make sure that the directories
|
||||
# exist on the first master for cases where the first master isn't
|
||||
# being run.
|
||||
#
|
||||
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_config_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
|
||||
when: gen_certs|default(false) or gen_tokens|default(false)
|
||||
|
||||
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_script_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [k8s-secrets, bootstrap-os]
|
||||
when: gen_certs|default(false) or gen_tokens|default(false)
|
||||
|
||||
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path={{ kube_token_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
when: gen_tokens|default(false)
|
||||
|
||||
- include: gen_certs.yml
|
||||
tags: k8s-secrets
|
||||
- include: gen_tokens.yml
|
||||
|
||||
@@ -21,10 +21,6 @@ global_as_num: "64512"
|
||||
# calico_mtu: 1500
|
||||
|
||||
# Limits for apps
|
||||
calico_rr_memory_limit: 1000M
|
||||
calico_rr_cpu_limit: 300m
|
||||
calico_rr_memory_requests: 500M
|
||||
calico_rr_cpu_requests: 150m
|
||||
calico_node_memory_limit: 500M
|
||||
calico_node_cpu_limit: 300m
|
||||
calico_node_memory_requests: 256M
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
- name : Calico | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Calico | reload calico-node
|
||||
service:
|
||||
|
||||
@@ -5,3 +5,9 @@ global_as_num: "64512"
|
||||
|
||||
calico_cert_dir: /etc/calico/certs
|
||||
etcd_cert_dir: /etc/ssl/etcd/ssl
|
||||
|
||||
# Limits for apps
|
||||
calico_rr_memory_limit: 1000M
|
||||
calico_rr_cpu_limit: 300m
|
||||
calico_rr_memory_requests: 500M
|
||||
calico_rr_cpu_requests: 150m
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
- name : Calico-rr | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Calico-rr | reload calico-rr
|
||||
service:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user