mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
120 Commits
v2.1.0
...
test-tag-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a222be7fae | ||
|
|
9d43cd86be | ||
|
|
6ed99f1f44 | ||
|
|
be1e1b41bd | ||
|
|
fd30131dc2 | ||
|
|
b7bf502e02 | ||
|
|
3f70e3a843 | ||
|
|
cae2982d81 | ||
|
|
b638c89556 | ||
|
|
9bc51bd0e2 | ||
|
|
408b4f3f42 | ||
|
|
d818ac1d59 | ||
|
|
bd1c764a1a | ||
|
|
8f377ad8bd | ||
|
|
97dabbe997 | ||
|
|
5a7a3f6d4a | ||
|
|
b4327fdc99 | ||
|
|
10f924a617 | ||
|
|
3dd6a01c8b | ||
|
|
585afef945 | ||
|
|
bdc65990e1 | ||
|
|
f2e4ffcac2 | ||
|
|
ae66b6e648 | ||
|
|
923057c1a8 | ||
|
|
0f6e08d34f | ||
|
|
4889a3e2e1 | ||
|
|
39d87a96aa | ||
|
|
e7c03ba66a | ||
|
|
08822ec684 | ||
|
|
6463a01e04 | ||
|
|
0cf1850465 | ||
|
|
1418fb394b | ||
|
|
e4eda88ca9 | ||
|
|
71a3c97d6f | ||
|
|
1c3d2924ae | ||
|
|
a11b9d28bd | ||
|
|
b54eb609bf | ||
|
|
dc8ff413f9 | ||
|
|
f8ffa1601d | ||
|
|
da01bc1fbb | ||
|
|
a2079a9ca9 | ||
|
|
a627299468 | ||
|
|
e5fdc63bdd | ||
|
|
fe83e70074 | ||
|
|
46c177b982 | ||
|
|
1df50adc1c | ||
|
|
b6cd9a4c4b | ||
|
|
2333ec4d1f | ||
|
|
85a8a54d3e | ||
|
|
7294a22901 | ||
|
|
f4b7474ade | ||
|
|
9428321607 | ||
|
|
882544446a | ||
|
|
73160c9b90 | ||
|
|
2184d6a3ff | ||
|
|
6e35895b44 | ||
|
|
8009ff8537 | ||
|
|
9bf792ce0b | ||
|
|
f05aaeb329 | ||
|
|
1bdf34e7dc | ||
|
|
cd25bfca91 | ||
|
|
1b621ab81c | ||
|
|
cb2e5ac776 | ||
|
|
8ce32eb3e1 | ||
|
|
aae0314bda | ||
|
|
35d5248d41 | ||
|
|
0ccc2555d3 | ||
|
|
b26a711e96 | ||
|
|
2218a052b2 | ||
|
|
40f419ca54 | ||
|
|
f742fc3dd1 | ||
|
|
33fbcc56d6 | ||
|
|
61d05dea58 | ||
|
|
8a821060a3 | ||
|
|
0d44599a63 | ||
|
|
8e29b08070 | ||
|
|
b6c3e61603 | ||
|
|
dc08b75c6a | ||
|
|
5420fa942e | ||
|
|
1ee33d3a8d | ||
|
|
61dab8dc0b | ||
|
|
0022a2b29e | ||
|
|
b2a27ed089 | ||
|
|
d8ae50800a | ||
|
|
43fa72b7b7 | ||
|
|
36b62b7270 | ||
|
|
73204c868d | ||
|
|
2ee889843a | ||
|
|
74b78e75a1 | ||
|
|
6905edbeb6 | ||
|
|
6c69da1573 | ||
|
|
e776dfd800 | ||
|
|
95bf380d07 | ||
|
|
2a61ad1b57 | ||
|
|
80703010bd | ||
|
|
e88c10670e | ||
|
|
2a2953c674 | ||
|
|
1054f37765 | ||
|
|
f77257cf79 | ||
|
|
f004cc07df | ||
|
|
065a4da72d | ||
|
|
98c7f2eb13 | ||
|
|
d332502d3d | ||
|
|
a7bf7867d7 | ||
|
|
c63cda7c21 | ||
|
|
caab0cdf27 | ||
|
|
1191876ae8 | ||
|
|
fa51a589ef | ||
|
|
3f274115b0 | ||
|
|
3b0918981e | ||
|
|
a327dfeed7 | ||
|
|
d8cef34d6c | ||
|
|
6fb6947feb | ||
|
|
db8173da28 | ||
|
|
bcdfb3cfb0 | ||
|
|
79aeb10431 | ||
|
|
e22f938ae5 | ||
|
|
cf042b2a4c | ||
|
|
65c86377fc | ||
|
|
8e4e3998dd |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -10,4 +10,5 @@ temp
|
||||
*.pyo
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
|
||||
136
.gitlab-ci.yml
136
.gitlab-ci.yml
@@ -1,4 +1,5 @@
|
||||
stages:
|
||||
- moderator
|
||||
- unit-tests
|
||||
- deploy-gce-part1
|
||||
- deploy-gce-part2
|
||||
@@ -17,7 +18,7 @@ variables:
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install ansible
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
@@ -48,12 +49,14 @@ before_script:
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
BOOTSTRAP_OS: none
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESOLVCONF_MODE: docker_dns
|
||||
LOG_LEVEL: "-vv"
|
||||
ETCD_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
MAGIC: "ci check this"
|
||||
|
||||
|
||||
.gce: &gce
|
||||
<<: *job
|
||||
<<: *docker_service
|
||||
@@ -62,10 +65,9 @@ before_script:
|
||||
paths:
|
||||
- downloads/
|
||||
- $HOME/.cache
|
||||
stage: deploy-gce
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install ansible==2.1.3.0
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
@@ -84,31 +86,37 @@ before_script:
|
||||
- ls
|
||||
- echo ${PWD}
|
||||
- >
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||
${LOG_LEVEL}
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
|
||||
# Create cluster
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e download_localhost=true
|
||||
-e download_run_once=true
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
cluster.yml
|
||||
|
||||
|
||||
@@ -122,6 +130,69 @@ before_script:
|
||||
## Advanced DNS checks
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
## Idempotency checks 1/5 (repeat deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
## Idempotency checks 3/5 (reset deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
reset.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 4/5 (redeploy after reset)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
after_script:
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
@@ -139,9 +210,9 @@ before_script:
|
||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
|
||||
@@ -176,25 +247,27 @@ before_script:
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
IDEMPOT_CHECK: "false"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
@@ -202,6 +275,7 @@ before_script:
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
@@ -216,7 +290,7 @@ before_script:
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separated
|
||||
CLUSTER_MODE: separate
|
||||
ETCD_DEPLOYMENT: rkt
|
||||
KUBELET_DEPLOYMENT: rkt
|
||||
|
||||
@@ -435,15 +509,21 @@ ubuntu-rkt-sep:
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# Premoderated with manual actions
|
||||
syntax-check:
|
||||
ci-authorized:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
stage: moderator
|
||||
before_script:
|
||||
- apt-get -y install jq
|
||||
script:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
|
||||
34
RELEASE.md
34
RELEASE.md
@@ -7,3 +7,37 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||
4. The release issue is closed
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
||||
|
||||
## Major/minor releases, merge freezes and milestones
|
||||
|
||||
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
||||
branches, and there are no backports. Therefore, there is no need for merge
|
||||
freezes as well.
|
||||
|
||||
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||
support lifetime, which ends once the milestone closed. Then only a next major
|
||||
or minor release can be done.
|
||||
|
||||
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||
Older or newer versions are not supported and not tested for the given release.
|
||||
|
||||
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
||||
[semver](http://semver.org/). Every version describes only a stable release.
|
||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||
the contributed addons or bound versions of Kubernetes and other components, are
|
||||
considered out of Kargo scope and are up to the components' teams to deal with and
|
||||
document.
|
||||
|
||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
foo
|
||||
foo
|
||||
foo
|
||||
|
||||
@@ -7,3 +7,5 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
|
||||
@@ -46,7 +46,6 @@
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
|
||||
- hosts: calico-rr
|
||||
@@ -63,5 +62,4 @@
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
@@ -40,7 +40,8 @@ import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROLES = ['kube-master', 'all', 'k8s-cluster:children', 'kube-node', 'etcd']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
@@ -51,10 +52,18 @@ def get_var_as_bool(name, default):
|
||||
value = os.environ.get(name, '')
|
||||
return _boolean_states.get(value.lower(), default)
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
||||
class KargoInventory(object):
|
||||
|
||||
@@ -74,11 +83,16 @@ class KargoInventory(object):
|
||||
if changed_hosts:
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
self.set_etcd(list(self.hosts.keys())[:3])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_master(list(self.hosts.keys())[3:5])
|
||||
else:
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:3])
|
||||
else: # Show help if no options
|
||||
self.show_help()
|
||||
sys.exit(0)
|
||||
@@ -205,8 +219,32 @@ class KargoInventory(object):
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.config.items('kube-master'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.config.items('kube-node'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.config['all']) >= SCALE_THRESHOLD:
|
||||
if self.config.has_option('etcd', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
||||
if self.config.has_option('kube-master', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
@@ -275,7 +313,15 @@ print_ips - Write a space-delimited list of IPs from "all" group
|
||||
Advanced usage:
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1'''
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
'''
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
scm: git
|
||||
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-dashboard.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
@@ -210,3 +210,31 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
@@ -11,7 +11,7 @@ deps =
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = py.test -vv #{posargs:./tests}
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../roles/kubernetes-apps/lib
|
||||
@@ -30,7 +30,7 @@ requirements.
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
# Valid bootstrap options (required): ubuntu, coreos, none
|
||||
bootstrap_os: none
|
||||
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# Uncomment this line for Container Linux by CoreOS only.
|
||||
# Directory where python binary is installed
|
||||
# ansible_python_interpreter: "/opt/bin/python"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_api_pwd: "changeme"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
# really inventory level variables, but described here for consistency.
|
||||
#
|
||||
# When advertising access, the access_ip will be used, but will defer to
|
||||
# ip and then the default ansible ip when unspecified.
|
||||
#
|
||||
# When binding to restrict access, the ip variable will be used, but will
|
||||
# defer to the default ansible ip when unspecified.
|
||||
#
|
||||
# The ip variable is used for specific address binding, e.g. listen address
|
||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
||||
# systems where one address should be preferred over another.
|
||||
# ip: 10.2.2.2
|
||||
#
|
||||
# The access_ip variable is used to define how other nodes should access
|
||||
# the node. This is used in flannel to allow other flannel nodes to see
|
||||
# this node for example. The access_ip is really useful AWS and Google
|
||||
# environments where the nodes are accessed remotely by the "public" ip,
|
||||
# but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
# Etcd access modes:
|
||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
etcd_multiaccess: true
|
||||
|
||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||
# kube_apiserver_port (default 443)
|
||||
loadbalancer_apiserver_localhost: true
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
kube_network_plugin: flannel
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network total size (optional). This is the prefix of the
|
||||
# entire network. Must be unused in your environment.
|
||||
# kube_network_prefix: 18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
peer_with_router: false
|
||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
# The subnets of each nodes will be distributed by the datacenter router
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
# as it greatly simplifies configuration of your applications - you can use
|
||||
# service names instead of magic environment variables.
|
||||
|
||||
# Can be dnsmasq_kubedns, kubedns or none
|
||||
dns_mode: dnsmasq_kubedns
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
|
||||
## Upstream dns servers used by dnsmasq
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# When azure is used, you need to also set the following variables.
|
||||
# cloud_provider:
|
||||
|
||||
# see docs/azure.md for details on how to get these values
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
# - name: kube-system/grafana
|
||||
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/group_vars/all.yml
|
||||
@@ -8,20 +8,39 @@ The inventory is composed of 3 groups:
|
||||
|
||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||
Note: if you want the server to act both as master and node the server must be defined on both groups _kube-master_ and _kube-node_
|
||||
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
||||
|
||||
Note: do not modify the children of _k8s-cluster_, like putting
|
||||
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
||||
to do that and you have it fully contained in the latter:
|
||||
|
||||
```
|
||||
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
||||
```
|
||||
|
||||
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
||||
If you want it a standalone, make sure those groups do not intersect.
|
||||
If you want the server to act both as master and node, the server must be defined
|
||||
on both groups _kube-master_ and _kube-node_. If you want a standalone and
|
||||
unschedulable master, the server must be defined only in the _kube-master_ and
|
||||
not _kube-node_.
|
||||
|
||||
There are also two special groups:
|
||||
|
||||
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
|
||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||
|
||||
Below is a complete inventory example:
|
||||
|
||||
```
|
||||
## Configure 'ip' variable to bind kubernetes services on a
|
||||
## different ip than the default iface
|
||||
node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
node1 ansible_ssh_host=95.54.0.12 ip=10.3.0.1
|
||||
node2 ansible_ssh_host=95.54.0.13 ip=10.3.0.2
|
||||
node3 ansible_ssh_host=95.54.0.14 ip=10.3.0.3
|
||||
node4 ansible_ssh_host=95.54.0.15 ip=10.3.0.4
|
||||
node5 ansible_ssh_host=95.54.0.16 ip=10.3.0.5
|
||||
node6 ansible_ssh_host=95.54.0.17 ip=10.3.0.6
|
||||
|
||||
[kube-master]
|
||||
node1
|
||||
@@ -42,7 +61,6 @@ node6
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
etcd
|
||||
```
|
||||
|
||||
Group vars and overriding variables precedence
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
||||
|
||||
Here is a simple example on AWS:
|
||||
Here is a simple example on AWS:
|
||||
|
||||
* Create instances and generate the inventory
|
||||
|
||||
@@ -12,21 +12,45 @@ Here is a simple example on AWS:
|
||||
kargo aws --instances 3
|
||||
```
|
||||
|
||||
* Run the deployment
|
||||
* Run the deployment
|
||||
|
||||
```
|
||||
kargo deploy --aws -u centos -n calico
|
||||
```
|
||||
|
||||
Building your own inventory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
---------------------------
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_generator/inventory_generator.py)
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
|
||||
to create or modify an Ansible inventory. Currently, it is limited in
|
||||
functionality and is only use for making a basic Kargo cluster, but it does
|
||||
support creating large clusters.
|
||||
support creating large clusters. It now supports
|
||||
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||
certain threshold. Run inventory.py help for more information.
|
||||
|
||||
Example inventory generator usage:
|
||||
|
||||
```
|
||||
cp -r inventory my_inventory
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
|
||||
```
|
||||
|
||||
Starting custom deployment
|
||||
--------------------------
|
||||
|
||||
Once you have an inventory, you may want to customize deployment data vars
|
||||
and start the deployment:
|
||||
|
||||
```
|
||||
# Edit my_inventory/groups_vars/*.yaml to override data vars
|
||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yaml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
See more details in the [ansible guide](ansible.md).
|
||||
|
||||
@@ -33,15 +33,20 @@ proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient.
|
||||
where an external LB or virtual IP management is inconvenient. This option is
|
||||
configured by the variable `loadbalancer_apiserver_localhost`. You may also
|
||||
define the port the local internal loadbalancer users by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||
It is also import to note that Kargo will only configure kubelet and kube-proxy
|
||||
on non-master nodes to use the local internal loadbalancer.
|
||||
|
||||
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
||||
you will need to configure your own loadbalancer to achieve HA. Note that
|
||||
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
||||
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
||||
the `access_ip` or IP address of the first server node in the `kube-master`
|
||||
group. It can also configure clients to use endpoints for a given loadbalancer
|
||||
type. The following diagram shows how traffic to the apiserver is directed.
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||
a user and is not covered by ansible roles in Kargo. By default, it only configures
|
||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||
apiserver is directed.
|
||||
|
||||

|
||||
|
||||
@@ -90,7 +95,7 @@ Access endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB | http://lc:p | https://lc:sp |
|
||||
| Local LB | http://lc:p | https://lc:nsp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||
|
||||
@@ -99,7 +104,9 @@ Where:
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `lc` - localhost;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
||||
|
||||
|
||||
@@ -27,5 +27,15 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
end up with the 'm' skipped for docker as well. This is required as docker does not
|
||||
understand k8s units well.
|
||||
|
||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||
from host/network interruption much quicker with calico-rr. Note that
|
||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||
etcd role is okay).
|
||||
|
||||
* Check out the
|
||||
[Inventory](https://github.com/kubernetes-incubator/kargo/blob/master/docs/getting-started.md#building-your-own-inventory)
|
||||
section of the Getting started guide for tips on creating a large scale
|
||||
Ansible inventory.
|
||||
|
||||
For example, when deploying 200 nodes, you may want to run ansible with
|
||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||
|
||||
@@ -13,40 +13,41 @@ That would probably improve deployment speed and certs management [#553](https:/
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
||||
|
||||
### Provisionning and cloud providers
|
||||
- Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- On AWS autoscaling, multi AZ
|
||||
- On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||
- On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||
- **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||
|
||||
### Tests
|
||||
- Run kubernetes e2e tests
|
||||
- migrate to jenkins
|
||||
- [x] Run kubernetes e2e tests
|
||||
- [x] migrate to jenkins
|
||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||
- Full tests on GCE per day (All OS's, all network plugins)
|
||||
- trigger a single test per pull request
|
||||
- single test with the Ansible version n-1 per day
|
||||
- Test idempotency on on single OS but for all network plugins/container engines
|
||||
- single test on AWS per day
|
||||
- test different achitectures :
|
||||
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
||||
- [x] trigger a single test per pull request
|
||||
- [ ] ~~single test with the Ansible version n-1 per day~~
|
||||
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
||||
- [ ] single test on AWS per day
|
||||
- [x] test different achitectures :
|
||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
|
||||
### Lifecycle
|
||||
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- Drain worker node when shutting down/deleting an instance
|
||||
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- [ ] Drain worker node when shutting down/deleting an instance
|
||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||
|
||||
### Networking
|
||||
- romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- Opencontrail
|
||||
- Canal
|
||||
- Cloud Provider native networking (instead of our network plugins)
|
||||
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- [ ] Opencontrail
|
||||
- [x] Canal
|
||||
- [x] Cloud Provider native networking (instead of our network plugins)
|
||||
|
||||
### High availability
|
||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||
|
||||
@@ -55,7 +55,7 @@ kube_users:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
@@ -127,6 +127,8 @@ peer_with_router: false
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
# local loadbalancer should use this port instead - default to kube_apiserver_port
|
||||
nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
|
||||
# ## configure a bastion host if your nodes are not publicly reachable
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# bastion ansible_ssh_host=x.x.x.x
|
||||
|
||||
# [kube-master]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ansible
|
||||
ansible>=2.2.1
|
||||
netaddr
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
{% for h in groups['all'] %}
|
||||
{% if h != 'bastion' %}
|
||||
{% if vars.update({'hosts': vars['hosts'] + ' ' + hostvars[h]['ansible_ssh_host']}) %}{% endif %}
|
||||
{% if vars.update({'hosts': vars['hosts'] + ' ' + (hostvars[h].get('ansible_ssh_host') or hostvars[h]['ansible_host'])}) %}{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -18,4 +18,4 @@ Host {{ bastion_ip }}
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
||||
StrictHostKeyChecking no
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
#nameservers:
|
||||
# - 127.0.0.1
|
||||
|
||||
dns_forward_max: 150
|
||||
cache_size: 1000
|
||||
|
||||
# Versions
|
||||
dnsmasq_version: 2.72
|
||||
|
||||
|
||||
@@ -27,7 +27,8 @@ log-queries
|
||||
{% endif %}
|
||||
bogus-priv
|
||||
no-negcache
|
||||
cache-size=1000
|
||||
cache-size={{ cache_size }}
|
||||
dns-forward-max={{ dns_forward_max }}
|
||||
max-cache-ttl=10
|
||||
max-ttl=20
|
||||
log-facility=-
|
||||
|
||||
@@ -20,8 +20,8 @@ spec:
|
||||
- dnsmasq
|
||||
args:
|
||||
- -k
|
||||
- "-7"
|
||||
- /etc/dnsmasq.d
|
||||
- -C
|
||||
- /etc/dnsmasq.d/01-kube-dns.conf
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
|
||||
@@ -65,10 +65,11 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
notify: restart docker
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
shell: docker version -f "{{ '{{' }}.Client.Version{{ '}}' }}"
|
||||
raw: docker version -f "{{ '{{' }}.Client.Version{{ '}}' }}"
|
||||
register: docker_version
|
||||
failed_when: docker_version.stdout|version_compare('1.12', '<')
|
||||
changed_when: false
|
||||
|
||||
@@ -4,7 +4,8 @@ docker_kernel_min_version: '3.10'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.5-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.0-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
||||
# or do 'yum --showduplicates list docker-engine'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.11': docker-engine-1.11.2-1.el7.centos
|
||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||
'1.13': docker-engine-1.13.0-1.el7.centos
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: docker-engine
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
|
||||
@@ -6,7 +6,8 @@ docker_kernel_min_version: '3.10'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-engine
|
||||
'1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.5-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.0-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
|
||||
@@ -23,16 +23,14 @@ etcd_version: v3.0.6
|
||||
# after migration to container download
|
||||
calico_version: "v1.0.0"
|
||||
calico_cni_version: "v1.5.5"
|
||||
weave_version: v1.6.1
|
||||
weave_version: 1.8.2
|
||||
flannel_version: v0.6.2
|
||||
pod_infra_version: 3.0
|
||||
|
||||
# Download URL's
|
||||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
||||
|
||||
# Checksums
|
||||
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
|
||||
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
||||
|
||||
# Containers
|
||||
@@ -66,6 +64,10 @@ netcheck_kubectl_tag: v0.18.0-120-gaeb4ac55ad12b1-dirty
|
||||
netcheck_agent_img_repo: "quay.io/l23network/mcp-netchecker-agent"
|
||||
netcheck_server_img_repo: "quay.io/l23network/mcp-netchecker-server"
|
||||
netcheck_kubectl_img_repo: "gcr.io/google_containers/kubectl"
|
||||
weave_kube_image_repo: "weaveworks/weave-kube"
|
||||
weave_kube_image_tag: "{{ weave_version }}"
|
||||
weave_npc_image_repo: "weaveworks/weave-npc"
|
||||
weave_npc_image_tag: "{{ weave_version }}"
|
||||
|
||||
nginx_image_repo: nginx
|
||||
nginx_image_tag: 1.11.4-alpine
|
||||
@@ -100,15 +102,6 @@ downloads:
|
||||
tag: "{{ netcheck_kubectl_tag }}"
|
||||
sha256: "{{ netcheck_kubectl_digest_checksum|default(None) }}"
|
||||
enabled: "{{ deploy_netchecker|bool }}"
|
||||
weave:
|
||||
dest: weave/bin/weave
|
||||
version: "{{weave_version}}"
|
||||
source_url: "{{weave_download_url}}"
|
||||
url: "{{weave_download_url}}"
|
||||
sha256: "{{ weave_checksum }}"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
etcd:
|
||||
version: "{{etcd_version}}"
|
||||
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
@@ -163,6 +156,18 @@ downloads:
|
||||
tag: "{{ calico_rr_image_tag }}"
|
||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr}} and kube_network_plugin == 'calico'"
|
||||
weave_kube:
|
||||
container: true
|
||||
repo: "{{ weave_kube_image_repo }}"
|
||||
tag: "{{ weave_kube_image_tag }}"
|
||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
weave_npc:
|
||||
container: true
|
||||
repo: "{{ weave_npc_image_repo }}"
|
||||
tag: "{{ weave_npc_image_tag }}"
|
||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
pod_infra:
|
||||
container: true
|
||||
repo: "{{ pod_infra_image_repo }}"
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
{%- if pull_by_digest|bool %}{{download.repo}}@sha256:{{download.sha256}}{%- else -%}{{download.repo}}:{{download.tag}}{%- endif -%}
|
||||
|
||||
- name: Register docker images info
|
||||
shell: "{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f {% raw %}'{{.RepoTags}},{{.RepoDigests}}'{% endraw %}"
|
||||
raw: >-
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}"
|
||||
register: docker_images_raw
|
||||
failed_when: false
|
||||
when: not download_always_pull|bool
|
||||
|
||||
@@ -71,14 +71,15 @@ fi
|
||||
# ETCD member
|
||||
if [ -n "$MASTERS" ]; then
|
||||
for host in $MASTERS; do
|
||||
cn="${host%%.*}"
|
||||
# Member key
|
||||
openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${host}" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
|
||||
# Admin key
|
||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${host}" > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
@@ -86,8 +87,9 @@ fi
|
||||
# Node keys
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${host}" > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
---
|
||||
|
||||
- name: Gen_certs | create etcd script dir
|
||||
file:
|
||||
path: "{{ etcd_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
when: inventory_hostname == groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | create etcd cert dir
|
||||
file:
|
||||
path={{ etcd_cert_dir }}
|
||||
@@ -15,6 +7,24 @@
|
||||
owner=root
|
||||
recurse=yes
|
||||
|
||||
- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
|
||||
file:
|
||||
path: "{{ etcd_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
|
||||
file:
|
||||
path={{ etcd_cert_dir }}
|
||||
group={{ etcd_cert_group }}
|
||||
state=directory
|
||||
owner=root
|
||||
recurse=yes
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: Gen_certs | write openssl config
|
||||
template:
|
||||
src: "openssl.conf.j2"
|
||||
|
||||
@@ -15,22 +15,22 @@
|
||||
- include: refresh_config.yml
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Restart etcd if binary or certs changed
|
||||
command: /bin/true
|
||||
notify: restart etcd
|
||||
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
|
||||
or etcd_secret_changed|default(false)
|
||||
|
||||
# Reload systemd before starting service
|
||||
# reload-systemd
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master
|
||||
|
||||
# After etcd cluster is assembled, make sure that
|
||||
# initial state of the cluster is in `existing`
|
||||
# state insted of `new`.
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
delay: 6
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
|
||||
|
||||
@@ -1,305 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: kube
|
||||
short_description: Manage Kubernetes Cluster
|
||||
description:
|
||||
- Create, replace, remove, and stop resources within a Kubernetes Cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
name:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The name associated with resource
|
||||
filename:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path and filename of the resource(s) definition file.
|
||||
kubectl:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path to the kubectl bin
|
||||
namespace:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The namespace associated with the resource(s)
|
||||
resource:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
|
||||
label:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The labels used to filter specific resources.
|
||||
server:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The url for the API server that commands are executed against.
|
||||
force:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate to force delete, replace, or stop.
|
||||
all:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
|
||||
log_level:
|
||||
required: false
|
||||
default: 0
|
||||
description:
|
||||
- Indicates the level of verbosity of logging by kubectl.
|
||||
state:
|
||||
required: false
|
||||
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
|
||||
default: present
|
||||
description:
|
||||
- present handles checking existence or creating if definition file provided,
|
||||
absent handles deleting resource(s) based on other options,
|
||||
latest handles creating ore updating based on existence,
|
||||
reloaded handles updating resource(s) definition using definition file,
|
||||
stopped handles stopping resource(s) based on other options.
|
||||
requirements:
|
||||
- kubectl
|
||||
author: "Kenny Jones (@kenjones-cisco)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: test nginx is present
|
||||
kube: name=nginx resource=rc state=present
|
||||
|
||||
- name: test nginx is stopped
|
||||
kube: name=nginx resource=rc state=stopped
|
||||
|
||||
- name: test nginx is absent
|
||||
kube: name=nginx resource=rc state=absent
|
||||
|
||||
- name: test nginx is present
|
||||
kube: filename=/tmp/nginx.yml
|
||||
"""
|
||||
|
||||
|
||||
class KubeManager(object):
|
||||
|
||||
def __init__(self, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
self.kubectl = module.params.get('kubectl')
|
||||
if self.kubectl is None:
|
||||
self.kubectl = module.get_bin_path('kubectl', True)
|
||||
self.base_cmd = [self.kubectl]
|
||||
|
||||
if module.params.get('server'):
|
||||
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||
|
||||
if module.params.get('log_level'):
|
||||
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
|
||||
|
||||
if module.params.get('namespace'):
|
||||
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||
|
||||
self.all = module.params.get('all')
|
||||
self.force = module.params.get('force')
|
||||
self.name = module.params.get('name')
|
||||
self.filename = module.params.get('filename')
|
||||
self.resource = module.params.get('resource')
|
||||
self.label = module.params.get('label')
|
||||
|
||||
def _execute(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
try:
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||
except Exception as exc:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||
return out.splitlines()
|
||||
|
||||
def _execute_nofail(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out.splitlines()
|
||||
|
||||
def create(self, check=True):
|
||||
if check and self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['create']
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to create')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def replace(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['replace']
|
||||
|
||||
if self.force:
|
||||
cmd.append('--force')
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to reload')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def delete(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['delete']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to delete without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def exists(self):
|
||||
cmd = ['get']
|
||||
|
||||
if not self.resource:
|
||||
return False
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
cmd.append('--no-headers')
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all-namespaces')
|
||||
|
||||
result = self._execute_nofail(cmd)
|
||||
if not result:
|
||||
return False
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['stop']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to stop without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(),
|
||||
filename=dict(),
|
||||
namespace=dict(),
|
||||
resource=dict(),
|
||||
label=dict(),
|
||||
server=dict(),
|
||||
kubectl=dict(),
|
||||
force=dict(default=False, type='bool'),
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
)
|
||||
)
|
||||
|
||||
changed = False
|
||||
|
||||
manager = KubeManager(module)
|
||||
state = module.params.get('state')
|
||||
|
||||
if state == 'present':
|
||||
result = manager.create()
|
||||
|
||||
elif state == 'absent':
|
||||
result = manager.delete()
|
||||
|
||||
elif state == 'reloaded':
|
||||
result = manager.replace()
|
||||
|
||||
elif state == 'stopped':
|
||||
result = manager.stop()
|
||||
|
||||
elif state == 'latest':
|
||||
if manager.exists():
|
||||
manager.force = True
|
||||
result = manager.replace()
|
||||
else:
|
||||
result = manager.create(check=False)
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||
|
||||
if result:
|
||||
changed = True
|
||||
module.exit_json(changed=changed,
|
||||
msg='success: %s' % (' '.join(result))
|
||||
)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -3,3 +3,6 @@ dependencies:
|
||||
- role: kubernetes-apps/network_plugin/canal
|
||||
when: kube_network_plugin == 'canal'
|
||||
tags: canal
|
||||
- role: kubernetes-apps/network_plugin/weave
|
||||
when: kube_network_plugin == 'weave'
|
||||
tags: weave
|
||||
|
||||
24
roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
Normal file
24
roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
- name: Weave | Start Resources
|
||||
run_once: true
|
||||
kube:
|
||||
name: "weave-net"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/weave-net.yml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
state: "{{ item | ternary('latest','present') }}"
|
||||
with_items: "{{ weave_manifest.changed }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
|
||||
|
||||
- name: "Weave | wait for weave to become available"
|
||||
uri:
|
||||
url: http://127.0.0.1:6784/status
|
||||
return_content: yes
|
||||
run_once: true
|
||||
register: weave_status
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
until: "{{ weave_status.status == 200 and
|
||||
'Status: ready' in weave_status.content }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
@@ -11,21 +11,19 @@
|
||||
changed_when: false
|
||||
tags: [hyperkube, kubectl, upgrade]
|
||||
|
||||
- name: Gather kubectl bash completion
|
||||
command: "{{ bin_dir }}/kubectl completion bash"
|
||||
no_log: true
|
||||
register: kubectl_bash_completion
|
||||
- name: Install kubectl bash completion
|
||||
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
|
||||
#no_log: true
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags: kubectl
|
||||
|
||||
- name: Write kubectl bash completion
|
||||
copy:
|
||||
content: "{{ kubectl_bash_completion.stdout }}"
|
||||
dest: /etc/bash_completion.d/kubectl.sh
|
||||
- name: Set kubectl bash completion file
|
||||
file:
|
||||
path: /etc/bash_completion.d/kubectl.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_os_family in ["Debian","RedHat"] and kubectl_bash_completion.changed
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
tags: [kubectl, upgrade]
|
||||
|
||||
- name: Write kube-apiserver manifest
|
||||
|
||||
@@ -27,3 +27,5 @@ nginx_cpu_requests: 50m
|
||||
|
||||
nginx_image_repo: nginx
|
||||
nginx_image_tag: 1.11.4-alpine
|
||||
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
#!/bin/bash
|
||||
{{ docker_bin_dir }}/docker run --privileged \
|
||||
--net=host --pid=host --name=kubelet --restart=on-failure:5 \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
-v {{kube_config_dir}}:{{kube_config_dir}} \
|
||||
-v /sys:/sys \
|
||||
-v /dev:/dev \
|
||||
-v {{ docker_daemon_graph }}:/var/lib/docker \
|
||||
-v /var/run:/var/run \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet \
|
||||
--memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||
nsenter --target=1 --mount --wd=. -- \
|
||||
./hyperkube kubelet \
|
||||
$@
|
||||
{{ docker_bin_dir }}/docker run \
|
||||
--net=host \
|
||||
--pid=host \
|
||||
--privileged \
|
||||
--name=kubelet \
|
||||
--restart=on-failure:5 \
|
||||
--memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
||||
--cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
-v /etc/ssl:/etc/ssl:ro \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
-v {{ dir }}:{{ dir }}:ro \
|
||||
{% endfor -%}
|
||||
-v /sys:/sys:ro \
|
||||
-v {{ docker_daemon_graph }}:/var/lib/docker:rw \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:shared \
|
||||
-v /var/run:/var/run:rw \
|
||||
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||
./hyperkube kubelet \
|
||||
$@
|
||||
|
||||
@@ -20,22 +20,28 @@ ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
|
||||
EnvironmentFile={{kube_config_dir}}/kubelet.env
|
||||
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--volume dns,kind=host,source=/etc/resolv.conf \
|
||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
|
||||
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--volume etcd-ssl,kind=host,source={{ etcd_config_dir }},readOnly=true \
|
||||
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
||||
--volume run,kind=host,source=/run,readOnly=false \
|
||||
--volume usr-share-certs,kind=host,source=/usr/share/ca-certificates,readOnly=true \
|
||||
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
||||
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false \
|
||||
--volume run,kind=host,source=/run,readOnly=false \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=dns,target=/etc/resolv.conf \
|
||||
--mount volume=etc-cni,target=/etc/cni \
|
||||
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \
|
||||
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--mount volume=etcd-ssl,target={{ etcd_config_dir }} \
|
||||
--mount volume=opt-cni,target=/opt/cni \
|
||||
--mount volume=run,target=/run \
|
||||
--mount volume=usr-share-certs,target=/usr/share/ca-certificates \
|
||||
--mount volume=var-lib-docker,target=/var/lib/docker \
|
||||
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
||||
--mount volume=run,target=/run \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
|
||||
--uuid-file-save=/var/run/kubelet.uuid \
|
||||
|
||||
@@ -16,7 +16,7 @@ stream {
|
||||
}
|
||||
|
||||
server {
|
||||
listen {{ kube_apiserver_port }};
|
||||
listen 127.0.0.1:{{ nginx_kube_apiserver_port }};
|
||||
proxy_pass kube_apiserver;
|
||||
proxy_timeout 10m;
|
||||
proxy_connect_timeout 1s;
|
||||
|
||||
@@ -8,8 +8,8 @@ clusters:
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/node.pem
|
||||
client-key: {{ kube_cert_dir }}/node-key.pem
|
||||
client-certificate: {{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
client-key: {{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
|
||||
@@ -24,7 +24,7 @@ openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
|
||||
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
|
||||
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
|
||||
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
|
||||
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID') }}"
|
||||
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
|
||||
|
||||
# All clients access each node individually, instead of using a load balancer.
|
||||
etcd_multiaccess: true
|
||||
|
||||
23
roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
Normal file
23
roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
|
||||
# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x
|
||||
# or when changing resolvconf_mode)
|
||||
|
||||
- name: Remove kargo specific config from dhclient config
|
||||
blockinfile:
|
||||
dest: "{{dhclientconffile}}"
|
||||
state: absent
|
||||
backup: yes
|
||||
follow: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
when: dhclientconffile is defined
|
||||
notify: Preinstall | restart network
|
||||
|
||||
- name: Remove kargo specific dhclient hook
|
||||
file: path="{{ dhclienthookfile }}" state=absent
|
||||
when: dhclienthookfile is defined
|
||||
notify: Preinstall | restart network
|
||||
|
||||
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
|
||||
# nameservers and search domains
|
||||
- meta: flush_handlers
|
||||
@@ -13,6 +13,7 @@
|
||||
follow: yes
|
||||
marker: "# Ansible entries {mark}"
|
||||
notify: Preinstall | restart network
|
||||
when: dhclientconffile is defined
|
||||
|
||||
- name: Configue dhclient hooks for resolv.conf (non-RH)
|
||||
template:
|
||||
|
||||
@@ -76,17 +76,6 @@
|
||||
when: cloud_provider is defined and cloud_provider == 'azure'
|
||||
tags: [cloud-provider, azure, facts]
|
||||
|
||||
- name: Enable ip forwarding
|
||||
lineinfile:
|
||||
dest: /etc/sysctl.d/99-sysctl.conf
|
||||
regexp: '^net.ipv4.ip_forward='
|
||||
line: 'net.ipv4.ip_forward=1'
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
validate: 'sysctl -f %s'
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Create cni directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
@@ -134,6 +123,13 @@
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: bootstrap-os
|
||||
|
||||
# Todo : selinux configuration
|
||||
- name: Set selinux policy to permissive
|
||||
selinux: policy=targeted state=permissive
|
||||
when: ansible_os_family == "RedHat"
|
||||
changed_when: False
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Disable IPv6 DNS lookup
|
||||
lineinfile:
|
||||
dest: /etc/gai.conf
|
||||
@@ -143,11 +139,15 @@
|
||||
when: disable_ipv6_dns and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: bootstrap-os
|
||||
|
||||
# Todo : selinux configuration
|
||||
- name: Set selinux policy to permissive
|
||||
selinux: policy=targeted state=permissive
|
||||
when: ansible_os_family == "RedHat"
|
||||
changed_when: False
|
||||
- name: Enable ip forwarding
|
||||
lineinfile:
|
||||
dest: /etc/sysctl.d/99-sysctl.conf
|
||||
regexp: '^net.ipv4.ip_forward='
|
||||
line: 'net.ipv4.ip_forward=1'
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
validate: 'sysctl -f %s'
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Write openstack cloud-config
|
||||
@@ -156,7 +156,7 @@
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when: cloud_provider is defined and cloud_provider == "openstack"
|
||||
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "openstack"
|
||||
tags: [cloud-provider, openstack]
|
||||
|
||||
- name: Write azure cloud-config
|
||||
@@ -165,7 +165,7 @@
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when: cloud_provider is defined and cloud_provider == "azure"
|
||||
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "azure"
|
||||
tags: [cloud-provider, azure]
|
||||
|
||||
- include: etchosts.yml
|
||||
@@ -175,6 +175,14 @@
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
- include: dhclient-hooks.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
- include: dhclient-hooks-undo.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode != 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
- name: Check if we are running inside a Azure VM
|
||||
stat: path=/var/lib/waagent/
|
||||
register: azure_check
|
||||
|
||||
@@ -58,7 +58,3 @@
|
||||
mode: 0644
|
||||
notify: Preinstall | update resolvconf for Container Linux by CoreOS
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- include: dhclient-hooks.yml
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
tags: [bootstrap-os, resolvconf]
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- set_fact:
|
||||
kube_apiserver_endpoint: |-
|
||||
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ kube_apiserver_port }}
|
||||
https://localhost:{{ nginx_kube_apiserver_port }}
|
||||
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
|
||||
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
|
||||
{%- else -%}
|
||||
|
||||
@@ -17,16 +17,16 @@
|
||||
default_resolver: >-
|
||||
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}169.254.169.254{%- else -%}8.8.8.8{%- endif -%}
|
||||
|
||||
- name: check kubelet
|
||||
- name: check if kubelet is configured
|
||||
stat:
|
||||
path: "{{ bin_dir }}/kubelet"
|
||||
register: kubelet
|
||||
path: "{{ kube_config_dir }}/kubelet.env"
|
||||
register: kubelet_configured
|
||||
changed_when: false
|
||||
|
||||
- name: check if early DNS configuration stage
|
||||
set_fact:
|
||||
dns_early: >-
|
||||
{%- if kubelet.stat.exists -%}false{%- else -%}true{%- endif -%}
|
||||
{%- if kubelet_configured.stat.exists -%}false{%- else -%}true{%- endif -%}
|
||||
|
||||
- name: target resolv.conf files
|
||||
set_fact:
|
||||
@@ -41,15 +41,31 @@
|
||||
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf
|
||||
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: target dhclient conf/hook files for Red Hat family
|
||||
- name: check if /etc/dhclient.conf exists
|
||||
stat: path=/etc/dhclient.conf
|
||||
register: dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhclient.conf
|
||||
when: dhclient_stat.stat.exists
|
||||
|
||||
- name: check if /etc/dhcp/dhclient.conf exists
|
||||
stat: path=/etc/dhcp/dhclient.conf
|
||||
register: dhcp_dhclient_stat
|
||||
|
||||
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhcp/dhclient.conf
|
||||
when: dhcp_dhclient_stat.stat.exists
|
||||
|
||||
- name: target dhclient hook file for Red Hat family
|
||||
set_fact:
|
||||
dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: target dhclient conf/hook files for Debian family
|
||||
- name: target dhclient hook file for Debian family
|
||||
set_fact:
|
||||
dhclientconffile: /etc/dhcp/dhclient.conf
|
||||
dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
|
||||
@@ -27,8 +27,11 @@ Usage : $(basename $0) -f <config> [-d <ssldir>]
|
||||
-f | --config : Openssl configuration file
|
||||
-d | --ssldir : Directory where the certificates will be installed
|
||||
|
||||
ex :
|
||||
$(basename $0) -f openssl.conf -d /srv/ssl
|
||||
Environmental variables MASTERS and HOSTS should be set to generate keys
|
||||
for each host.
|
||||
|
||||
ex :
|
||||
MASTERS=node1 HOSTS="node1 node2" $(basename $0) -f openssl.conf -d /srv/ssl
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -61,21 +64,42 @@ cd "${tmpdir}"
|
||||
mkdir -p "${SSLDIR}"
|
||||
|
||||
# Root CA
|
||||
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||
if [ -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# Reuse existing CA
|
||||
cp $SSLDIR/{ca.pem,ca-key.pem} .
|
||||
else
|
||||
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Apiserver
|
||||
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
cat ca.pem >> apiserver.pem
|
||||
if [ ! -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# kube-apiserver key
|
||||
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
cat ca.pem >> apiserver.pem
|
||||
fi
|
||||
|
||||
if [ -n "$MASTERS" ]; then
|
||||
for host in $MASTERS; do
|
||||
cn="${host%%.*}"
|
||||
# admin key
|
||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# Nodes and Admin
|
||||
for i in node admin; do
|
||||
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
|
||||
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
# node key
|
||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 365 > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# Install certs
|
||||
mv *.pem ${SSLDIR}/
|
||||
|
||||
@@ -1,36 +1,49 @@
|
||||
---
|
||||
- name: "Check_certs | check if the certs have already been generated on first master"
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.pem"
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
register: kubecert_master
|
||||
run_once: true
|
||||
with_items: >-
|
||||
['ca.pem',
|
||||
{% for host in groups['k8s-cluster'] %}
|
||||
'node-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
|
||||
- name: "Check_certs | Set default value for 'sync_certs' and 'gen_certs' to false"
|
||||
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
|
||||
set_fact:
|
||||
sync_certs: false
|
||||
gen_certs: false
|
||||
secret_changed: false
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' and 'gen_certs' to true"
|
||||
- name: "Check_certs | Set 'gen_certs' to true"
|
||||
set_fact:
|
||||
gen_certs: true
|
||||
when: not kubecert_master.stat.exists
|
||||
when: "not {{ item.stat.exists }}"
|
||||
run_once: true
|
||||
with_items: "{{ kubecert_master.results }}"
|
||||
|
||||
- name: "Check certs | check if a cert already exists"
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/ca.pem"
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
register: kubecert
|
||||
with_items:
|
||||
- ca.pem
|
||||
- node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' to true"
|
||||
set_fact:
|
||||
sync_certs: true
|
||||
when: >-
|
||||
{%- set certs = {'sync': False} -%}
|
||||
{%- for server in play_hosts
|
||||
if (not hostvars[server].kubecert.stat.exists|default(False)) or
|
||||
(hostvars[server].kubecert.stat.checksum|default('') != kubecert_master.stat.checksum|default('')) -%}
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{%- for host in groups['k8s-cluster'] %}
|
||||
{% if host == inventory_hostname %}
|
||||
{% if (not kubecert.results[0].stat.exists|default(False)) or
|
||||
(kubecert.results[1].stat.checksum|default('') != kubecert_master.results[loop.index].stat.checksum|default('')) -%}
|
||||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{%- endfor -%}
|
||||
{{ certs.sync }}
|
||||
run_once: true
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
---
|
||||
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_config_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
|
||||
when: gen_certs|default(false)
|
||||
|
||||
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_script_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [k8s-secrets, bootstrap-os]
|
||||
when: gen_certs|default(false)
|
||||
|
||||
- name: Gen_certs | write openssl config
|
||||
template:
|
||||
src: "openssl.conf.j2"
|
||||
@@ -18,42 +38,103 @@
|
||||
|
||||
- name: Gen_certs | run cert generation script
|
||||
command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl.conf -d {{ kube_cert_dir }}"
|
||||
environment:
|
||||
- MASTERS: "{% for m in groups['kube-master'] %}
|
||||
{% if hostvars[m].sync_certs|default(true) %}
|
||||
{{ m }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
- HOSTS: "{% for h in groups['k8s-cluster'] %}
|
||||
{% if hostvars[h].sync_certs|default(true) %}
|
||||
{{ h }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
when: gen_certs|default(false)
|
||||
notify: set secret_changed
|
||||
|
||||
- set_fact:
|
||||
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
|
||||
node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
|
||||
all_master_certs: "['ca-key.pem',
|
||||
{% for node in groups['kube-master'] %}
|
||||
'admin-{{ node }}.pem',
|
||||
'admin-{{ node }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem',
|
||||
{% endfor %}]"
|
||||
my_master_certs: ['ca-key.pem',
|
||||
'admin-{{ inventory_hostname }}.pem',
|
||||
'admin-{{ inventory_hostname }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem'
|
||||
]
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
'node-{{ node }}.pem',
|
||||
'node-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem']
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather master certs
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: master_cert_data
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
run_once: true
|
||||
when: sync_certs|default(false)
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Gather node certs
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
|
||||
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_cert_data
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
run_once: true
|
||||
when: sync_certs|default(false)
|
||||
when: inventory_hostname in groups['kube-node'] and
|
||||
sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Copy certs on masters
|
||||
shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
|
||||
#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
|
||||
#char limit when using shell command
|
||||
|
||||
#FIXME(mattymo): Use tempfile module in ansible 2.3
|
||||
- name: Gen_certs | Prepare tempfile for unpacking certs
|
||||
shell: mktemp /tmp/certsXXXXX.tar.gz
|
||||
register: cert_tempfile
|
||||
|
||||
- name: Gen_certs | Write master certs to tempfile
|
||||
copy:
|
||||
content: "{{master_cert_data.stdout}}"
|
||||
dest: "{{cert_tempfile.stdout}}"
|
||||
owner: root
|
||||
mode: "0600"
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Unpack certs on masters
|
||||
shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}"
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
notify: set secret_changed
|
||||
|
||||
- name: Gen_certs | Cleanup tempfile
|
||||
file:
|
||||
path: "{{cert_tempfile.stdout}}"
|
||||
state: absent
|
||||
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
|
||||
shell: "base64 -d <<< '{{node_cert_data.stdout|quote}}' | tar xz -C {{ kube_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['kube-node'] and
|
||||
sync_certs|default(false) and
|
||||
inventory_hostname != groups['kube-master'][0]
|
||||
notify: set secret_changed
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
file:
|
||||
@@ -79,6 +160,20 @@
|
||||
{%- endif %}
|
||||
tags: facts
|
||||
|
||||
- name: SSL CA directories | Set SSL CA directories
|
||||
set_fact:
|
||||
ssl_ca_dirs: "[
|
||||
{% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
|
||||
'/usr/share/ca-certificates',
|
||||
{% elif ansible_os_family == 'RedHat' -%}
|
||||
'/etc/pki/tls',
|
||||
'/etc/pki/ca-trust',
|
||||
{% elif ansible_os_family == 'Debian' -%}
|
||||
'/usr/share/ca-certificates',
|
||||
{% endif -%}
|
||||
]"
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | add CA to trusted CA dir
|
||||
copy:
|
||||
src: "{{ kube_cert_dir }}/ca.pem"
|
||||
|
||||
@@ -35,6 +35,41 @@
|
||||
when: inventory_hostname in "{{ groups['kube-master'] }}"
|
||||
notify: set secret_changed
|
||||
|
||||
#
|
||||
# The following directory creates make sure that the directories
|
||||
# exist on the first master for cases where the first master isn't
|
||||
# being run.
|
||||
#
|
||||
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_config_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
|
||||
when: gen_certs|default(false) or gen_tokens|default(false)
|
||||
|
||||
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path: "{{ kube_script_dir }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
tags: [k8s-secrets, bootstrap-os]
|
||||
when: gen_certs|default(false) or gen_tokens|default(false)
|
||||
|
||||
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
|
||||
file:
|
||||
path={{ kube_token_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
when: gen_tokens|default(false)
|
||||
|
||||
- include: gen_certs.yml
|
||||
tags: k8s-secrets
|
||||
- include: gen_tokens.yml
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
- name: Calico | Disable node mesh
|
||||
shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
|
||||
when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false))
|
||||
and inventory_hostname in groups['kube-node']
|
||||
and inventory_hostname in groups['k8s-cluster']
|
||||
and not legacy_calicoctl)
|
||||
run_once: true
|
||||
|
||||
@@ -208,7 +208,7 @@
|
||||
| {{ bin_dir }}/calicoctl create -f -
|
||||
with_items: "{{ peers|default([]) }}"
|
||||
when: (not legacy_calicoctl and
|
||||
peer_with_router|default(false) and inventory_hostname in groups['kube-node'])
|
||||
peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'])
|
||||
|
||||
- name: Calico | Configure peering with route reflectors
|
||||
shell: >
|
||||
@@ -223,13 +223,13 @@
|
||||
| {{ bin_dir }}/calicoctl create --skip-exists -f -
|
||||
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
||||
when: (not legacy_calicoctl and
|
||||
peer_with_calico_rr|default(false) and inventory_hostname in groups['kube-node']
|
||||
peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']
|
||||
and hostvars[item]['cluster_id'] == cluster_id)
|
||||
|
||||
- name: Calico (old) | Disable node mesh
|
||||
shell: "{{ bin_dir }}/calicoctl bgp node-mesh off"
|
||||
when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false))
|
||||
and inventory_hostname in groups['kube-node']
|
||||
and inventory_hostname in groups['k8s-cluster']
|
||||
and legacy_calicoctl)
|
||||
run_once: true
|
||||
|
||||
@@ -237,11 +237,11 @@
|
||||
shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}"
|
||||
with_items: "{{ peers|default([]) }}"
|
||||
when: (legacy_calicoctl and
|
||||
peer_with_router|default(false) and inventory_hostname in groups['kube-node'])
|
||||
peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'])
|
||||
|
||||
- name: Calico (old) | Configure peering with route reflectors
|
||||
shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip']) }} as {{ local_as | default(global_as_num) }}"
|
||||
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
||||
when: (legacy_calicoctl and
|
||||
peer_with_calico_rr|default(false) and inventory_hostname in groups['kube-node']
|
||||
peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']
|
||||
and hostvars[item]['cluster_id'] == cluster_id)
|
||||
|
||||
@@ -7,7 +7,7 @@ Wants=docker.socket
|
||||
[Service]
|
||||
User=root
|
||||
PermissionsStartOnly=true
|
||||
{% if inventory_hostname in groups['kube-node'] and peer_with_router|default(false)%}
|
||||
{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --as={{ local_as }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
||||
{% else %}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }}
|
||||
|
||||
@@ -7,9 +7,9 @@ CALICO_IP6=""
|
||||
{% if calico_network_backend is defined %}
|
||||
CALICO_NETWORKING_BACKEND="{{calico_network_backend }}"
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube-node'] and peer_with_router|default(false)%}
|
||||
{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%}
|
||||
CALICO_AS="{{ local_as }}"
|
||||
{% endif %}
|
||||
CALICO_NO_DEFAULT_POOLS="true"
|
||||
CALICO_LIBNETWORK_ENABLED="true"
|
||||
CALICO_HOSTNAME="{{ inventory_hostname }}"
|
||||
CALICO_HOSTNAME="{{ ansible_hostname }}"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "calico-k8s-network",
|
||||
{% if not legacy_calicoctl %}
|
||||
"hostname": "{{ inventory_hostname }}",
|
||||
"hostname": "{{ ansible_hostname }}",
|
||||
{% endif %}
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "{{ etcd_access_endpoint }}",
|
||||
|
||||
@@ -14,6 +14,9 @@ canal_log_level: "info"
|
||||
canal_cert_dir: /etc/canal/certs
|
||||
etcd_cert_dir: /etc/ssl/etcd/ssl
|
||||
|
||||
# Canal Network Policy directory
|
||||
canal_policy_dir: /etc/kubernetes/policy
|
||||
|
||||
# Limits for apps
|
||||
calico_node_memory_limit: 500M
|
||||
calico_node_cpu_limit: 200m
|
||||
@@ -23,3 +26,8 @@ flannel_memory_limit: 500M
|
||||
flannel_cpu_limit: 200m
|
||||
flannel_memory_requests: 256M
|
||||
flannel_cpu_requests: 100m
|
||||
calicoctl_memory_limit: 170M
|
||||
calicoctl_cpu_limit: 100m
|
||||
calicoctl_memory_requests: 70M
|
||||
calicoctl_cpu_requests: 50m
|
||||
|
||||
|
||||
@@ -59,3 +59,17 @@
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
tags: [hyperkube, upgrade]
|
||||
|
||||
- name: Canal | Install calicoctl container script
|
||||
template:
|
||||
src: calicoctl-container.j2
|
||||
dest: "{{ bin_dir }}/calicoctl"
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
changed_when: false
|
||||
|
||||
- name: Canal | Create network policy directory
|
||||
file:
|
||||
path: "{{ canal_policy_dir }}"
|
||||
state: directory
|
||||
|
||||
15
roles/network_plugin/canal/templates/calicoctl-container.j2
Normal file
15
roles/network_plugin/canal/templates/calicoctl-container.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
{{ docker_bin_dir }}/docker run -i --privileged --rm \
|
||||
--net=host --pid=host \
|
||||
-e ETCD_ENDPOINTS={{ etcd_access_endpoint }} \
|
||||
-e ETCD_CA_CERT_FILE={{ canal_cert_dir }}/ca_cert.crt \
|
||||
-e ETCD_CERT_FILE={{ canal_cert_dir }}/cert.crt \
|
||||
-e ETCD_KEY_FILE={{ canal_cert_dir }}/key.pem \
|
||||
-v {{ docker_bin_dir }}/docker:{{ docker_bin_dir }}/docker \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /var/run/calico:/var/run/calico \
|
||||
-v {{ canal_cert_dir }}:{{ canal_cert_dir }}:ro \
|
||||
-v {{ canal_policy_dir }}:{{ canal_policy_dir }}:ro \
|
||||
--memory={{ calicoctl_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ calicoctl_cpu_limit|regex_replace('m', '') }} \
|
||||
{{ calicoctl_image_repo }}:{{ calicoctl_image_tag}} \
|
||||
$@
|
||||
@@ -1,4 +1,6 @@
|
||||
---
|
||||
# Limits
|
||||
weave_memory_limit: 500M
|
||||
weave_cpu_limit: 300m
|
||||
weave_cpu_limit: 30m
|
||||
weave_memory_requests: 300M
|
||||
weave_cpu_requests: 10m
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
- name: restart weave
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Weave | reload systemd
|
||||
- reload weave
|
||||
|
||||
- name : Weave | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
|
||||
- name: restart weaveproxy
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Weave | reload systemd
|
||||
- reload weaveproxy
|
||||
|
||||
- name: restart weaveexpose
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Weave | reload systemd
|
||||
- reload weaveexpose
|
||||
|
||||
- name: reload weave
|
||||
service:
|
||||
name: weave
|
||||
state: restarted
|
||||
|
||||
- name: reload weaveproxy
|
||||
service:
|
||||
name: weaveproxy
|
||||
state: restarted
|
||||
|
||||
- name: reload weaveexpose
|
||||
service:
|
||||
name: weaveexpose
|
||||
state: restarted
|
||||
@@ -1,5 +1,8 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.weave }}"
|
||||
file: "{{ downloads.weave_kube }}"
|
||||
tags: download
|
||||
- role: download
|
||||
file: "{{ downloads.weave_npc }}"
|
||||
tags: download
|
||||
|
||||
@@ -8,46 +8,8 @@
|
||||
changed_when: false
|
||||
tags: [hyperkube, upgrade]
|
||||
|
||||
- name: Weave | Install weave
|
||||
command: rsync -piu "{{ local_release_dir }}/weave/bin/weave" "{{ bin_dir }}/weave"
|
||||
changed_when: false
|
||||
|
||||
- name: Weave | pull weave images
|
||||
shell: "{{ bin_dir }}/weave setup"
|
||||
changed_when: false
|
||||
|
||||
- name: Weave | set perms
|
||||
file: path="{{ bin_dir }}/weave" mode=0755 state=file
|
||||
|
||||
- name: Weave | Set options
|
||||
- name: Weave | Create weave-net manifest
|
||||
template:
|
||||
src: weave.j2
|
||||
dest: "/etc/weave.env"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify:
|
||||
- restart weave
|
||||
|
||||
- name: Weave | Write weave systemd init file
|
||||
template: src=weave.service.j2 dest=/etc/systemd/system/weave.service
|
||||
notify: restart weave
|
||||
|
||||
- name: Weave | Write weaveproxy systemd init file
|
||||
template: src=weaveproxy.service.j2 dest=/etc/systemd/system/weaveproxy.service
|
||||
notify: restart weaveproxy
|
||||
|
||||
- name: Weave | Write weaveexpose systemd init file
|
||||
template: src=weaveexpose.service.j2 dest=/etc/systemd/system/weaveexpose.service
|
||||
notify: restart weaveexpose
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Weave | Enable weave
|
||||
service: name=weave enabled=yes state=started
|
||||
|
||||
- name: Weave | Enable weaveproxy
|
||||
service: name=weaveproxy enabled=yes state=started
|
||||
|
||||
- name: Weave | Enable weaveexpose
|
||||
service: name=weaveexpose enabled=yes state=started
|
||||
src: weave-net.yml.j2
|
||||
dest: "{{ kube_config_dir }}/weave-net.yml"
|
||||
register: weave_manifest
|
||||
|
||||
104
roles/network_plugin/weave/templates/weave-net.yml.j2
Normal file
104
roles/network_plugin/weave/templates/weave-net.yml.j2
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: weave-net
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
version: {{ weave_version }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: weave-net
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/tolerations: |
|
||||
[
|
||||
{
|
||||
"key": "dedicated",
|
||||
"operator": "Equal",
|
||||
"value": "master",
|
||||
"effect": "NoSchedule"
|
||||
}
|
||||
]
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: weave
|
||||
image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }}
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /home/weave/launch.sh
|
||||
env:
|
||||
- name: IPALLOC_RANGE
|
||||
value: {{ kube_pods_subnet }}
|
||||
{% if weave_checkpoint_disable is defined %}
|
||||
- name: CHECKPOINT_DISABLE
|
||||
value: {{ weave_checkpoint_disable }}
|
||||
{% endif %}
|
||||
{% if weave_expect_npc is defined %}
|
||||
- name: EXPECT_NPC
|
||||
value: {{ weave_expect_npc }}
|
||||
{% endif %}
|
||||
{% if weave_kube_peers is defined %}
|
||||
- name: KUBE_PEERS
|
||||
value: {{ weave_kube_peers }}
|
||||
{% endif %}
|
||||
{% if weave_ipalloc_init is defined %}
|
||||
- name: IPALLOC_INIT
|
||||
value: {{ weave_ipalloc_init }}
|
||||
{% endif %}
|
||||
{% if weave_expose_ip is defined %}
|
||||
- name: WEAVE_EXPOSE_IP
|
||||
value: {{ weave_expose_ip }}
|
||||
{% endif %}
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 6784
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: weavedb
|
||||
mountPath: /weavedb
|
||||
- name: cni-bin
|
||||
mountPath: /opt
|
||||
- name: cni-bin2
|
||||
mountPath: /host_home
|
||||
- name: cni-conf
|
||||
mountPath: /etc
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ weave_cpu_requests }}
|
||||
memory: {{ weave_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ weave_cpu_limit }}
|
||||
memory: {{ weave_memory_limit }}
|
||||
- name: weave-npc
|
||||
image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }}
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ weave_cpu_requests }}
|
||||
memory: {{ weave_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ weave_cpu_limit }}
|
||||
memory: {{ weave_memory_limit }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: weavedb
|
||||
emptyDir: {}
|
||||
- name: cni-bin
|
||||
hostPath:
|
||||
path: /opt
|
||||
- name: cni-bin2
|
||||
hostPath:
|
||||
path: /home
|
||||
- name: cni-conf
|
||||
hostPath:
|
||||
path: /etc
|
||||
@@ -1,7 +0,0 @@
|
||||
WEAVE_DOCKER_ARGS="--memory={{ weave_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ weave_cpu_limit|regex_replace('m', '') }}"
|
||||
WEAVE_PEERS="{% for host in groups['k8s-cluster'] %}{{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}{% if not loop.last %} {% endif %}{% endfor %}"
|
||||
WEAVEPROXY_ARGS="--rewrite-inspect --without-dns"
|
||||
WEAVE_SUBNET="--ipalloc-range {{ kube_pods_subnet }}"
|
||||
{% if weave_password is defined %}
|
||||
WEAVE_PASSWORD="{{ weave_password }}"
|
||||
{% endif %}
|
||||
@@ -1,18 +0,0 @@
|
||||
[Unit]
|
||||
Description=Weave Network
|
||||
Documentation=http://docs.weave.works/weave/latest_release/
|
||||
Wants=docker.socket
|
||||
After=docker.service docker.socket
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/weave.env
|
||||
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f weave
|
||||
ExecStartPre={{ bin_dir }}/weave launch-router \
|
||||
$WEAVE_SUBNET \
|
||||
$WEAVE_PEERS
|
||||
ExecStart={{ docker_bin_dir }}/docker attach weave
|
||||
ExecStop={{ bin_dir }}/weave stop
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Documentation=http://docs.weave.works/
|
||||
Wants=docker.socket weave.service
|
||||
After=docker.service docker.socket weave.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
TimeoutStartSec=0
|
||||
EnvironmentFile=-/etc/weave.%H.env
|
||||
EnvironmentFile=-/etc/weave.env
|
||||
ExecStart={{ bin_dir }}/weave expose
|
||||
ExecStop={{ bin_dir }}/weave hide
|
||||
|
||||
[Install]
|
||||
WantedBy=weave-network.target
|
||||
@@ -1,17 +0,0 @@
|
||||
[Unit]
|
||||
Description=Weave proxy for Docker API
|
||||
Documentation=http://docs.weave.works/
|
||||
Wants=docker.socket
|
||||
After=docker.service docker.socket
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/weave.%H.env
|
||||
EnvironmentFile=-/etc/weave.env
|
||||
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f weaveproxy
|
||||
ExecStartPre={{ bin_dir }}/weave launch-proxy $WEAVEPROXY_ARGS
|
||||
ExecStart={{ docker_bin_dir }}/docker attach weaveproxy
|
||||
Restart=on-failure
|
||||
ExecStop={{ bin_dir }}/weave stop-proxy
|
||||
|
||||
[Install]
|
||||
WantedBy=weave-network.target
|
||||
BIN
scripts/.premoderator.sh.swp
Normal file
BIN
scripts/.premoderator.sh.swp
Normal file
Binary file not shown.
@@ -11,5 +11,8 @@ issue=$(echo ${CI_BUILD_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
|
||||
user=$(curl ${CURL_ARGS} "https://api.github.com/repos/kubernetes-incubator/kargo/issues/${issue}/comments" \
|
||||
| jq -M "map(select(.body | contains (\"$MAGIC\"))) | .[0] .user.login" | tr -d '"')
|
||||
# Check for the required user group membership to allow (exit 0) or decline (exit >0) the pipeline
|
||||
[ "$user" != "null" ] || exit 1
|
||||
if [ "$user" = "null" ]; then
|
||||
echo "User does not have permissions to start CI run"
|
||||
exit 1
|
||||
fi
|
||||
curl ${CURL_ARGS} "https://api.github.com/orgs/kubernetes-incubator/members/${user}"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- hosts: node1
|
||||
- hosts: kube-master[0]
|
||||
|
||||
vars:
|
||||
test_image_repo: busybox
|
||||
@@ -16,11 +16,14 @@
|
||||
bin_dir: "/usr/local/bin"
|
||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
- name: Run a replica controller composed of 2 pods
|
||||
shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --replicas=2 --command -- tail -f /dev/null"
|
||||
- name: Create test namespace
|
||||
shell: "{{bin_dir}}/kubectl create namespace test"
|
||||
|
||||
- name: Run a replica controller composed of 2 pods in test ns
|
||||
shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null"
|
||||
|
||||
- name: Pods are running
|
||||
shell: "{{bin_dir}}/kubectl get pods --no-headers -o json"
|
||||
shell: "{{bin_dir}}/kubectl get pods --namespace test --no-headers -o json"
|
||||
register: run_pods_log
|
||||
until: [ '(run_pods_log.stdout | from_json)["items"] | map(attribute = "status.phase") | join(",") == "Running,Running"' ]
|
||||
retries: 18
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- hosts: node1
|
||||
- hosts: kube-master[0]
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: False
|
||||
roles:
|
||||
- ubuntu-bootstrap
|
||||
Reference in New Issue
Block a user