mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-13 21:34:40 +03:00
Compare commits
239 Commits
release-2.
...
v2.8.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f97687d19 | ||
|
|
447605ca0e | ||
|
|
3901480bc1 | ||
|
|
c42cb8f9b2 | ||
|
|
5c28bb0679 | ||
|
|
6d53229986 | ||
|
|
1e57d2e21a | ||
|
|
ea41fc5e74 | ||
|
|
4167807f17 | ||
|
|
2ac1c7562f | ||
|
|
2d6e31d281 | ||
|
|
0a19d1bf01 | ||
|
|
432f8e9841 | ||
|
|
19792cfae7 | ||
|
|
9463b70edd | ||
|
|
b109f52dab | ||
|
|
e0781483fa | ||
|
|
ffcea384a6 | ||
|
|
deff6a82fa | ||
|
|
919a268de3 | ||
|
|
487cfa5e6c | ||
|
|
5fcda86f8c | ||
|
|
f2635776cd | ||
|
|
d30dbdde23 | ||
|
|
b59d5c35bc | ||
|
|
8331f7b056 | ||
|
|
92274a74f7 | ||
|
|
1739c479ed | ||
|
|
551317f1cd | ||
|
|
ddc19f43ba | ||
|
|
993b8e2791 | ||
|
|
9a5438ce2f | ||
|
|
d33434647b | ||
|
|
02169e8f85 | ||
|
|
b07e93e08b | ||
|
|
bad886ca9b | ||
|
|
967a042321 | ||
|
|
a585318b1a | ||
|
|
07d2f1aa36 | ||
|
|
b15e685a0b | ||
|
|
885c6cff71 | ||
|
|
c5e425b02b | ||
|
|
3fa81bb86e | ||
|
|
5daadc022d | ||
|
|
0cfcd39d55 | ||
|
|
7875c38023 | ||
|
|
edfec26988 | ||
|
|
daa290100c | ||
|
|
b4eb25197b | ||
|
|
ac00d23b80 | ||
|
|
9ae2eefb9a | ||
|
|
8c18f053aa | ||
|
|
2aefa25448 | ||
|
|
6e01c1e377 | ||
|
|
3c6ee19785 | ||
|
|
0e2d3fb923 | ||
|
|
af5e05d08d | ||
|
|
c83bfc9df6 | ||
|
|
1ebb670141 | ||
|
|
9d0786cbb0 | ||
|
|
53bde23a5e | ||
|
|
187798086a | ||
|
|
1540bc9759 | ||
|
|
618ab93b42 | ||
|
|
5ba67c55a2 | ||
|
|
d8ad9aedad | ||
|
|
3e6d0a50e8 | ||
|
|
ff09141a14 | ||
|
|
d188876a91 | ||
|
|
6f6274d0d9 | ||
|
|
29ee581067 | ||
|
|
17f07e2613 | ||
|
|
9ebdf0e3cf | ||
|
|
730caa3d58 | ||
|
|
7deb842030 | ||
|
|
5f7d5e1e80 | ||
|
|
931c76e58f | ||
|
|
3fafa583d1 | ||
|
|
d8e9b0f675 | ||
|
|
846c7a26e8 | ||
|
|
98d766c68e | ||
|
|
087b7fa38e | ||
|
|
92877e8bf8 | ||
|
|
d3ef41b603 | ||
|
|
633bfa7ebc | ||
|
|
13af4c1f40 | ||
|
|
afc3f7dce4 | ||
|
|
e8901a2422 | ||
|
|
c888de8b38 | ||
|
|
fefa1670a6 | ||
|
|
589d22da0b | ||
|
|
3dcb914607 | ||
|
|
b2b421840c | ||
|
|
5c7eef70b4 | ||
|
|
c2710899ed | ||
|
|
b997912ebe | ||
|
|
e5d07f3a3d | ||
|
|
fb9155c450 | ||
|
|
dc3195310c | ||
|
|
9f7c2b08a5 | ||
|
|
a6932b6b81 | ||
|
|
77d705ca9f | ||
|
|
89ac53acd7 | ||
|
|
1e22c83f0f | ||
|
|
1ad1e80ae3 | ||
|
|
bc785196c8 | ||
|
|
dfdf530723 | ||
|
|
33f33a7358 | ||
|
|
113dd2146a | ||
|
|
14c2df0418 | ||
|
|
b316518864 | ||
|
|
612663667c | ||
|
|
6c14f35f00 | ||
|
|
289be0a0db | ||
|
|
a4de023c29 | ||
|
|
e3fdd4a0ac | ||
|
|
bc9e14a762 | ||
|
|
3c5f20190f | ||
|
|
9c83551a0e | ||
|
|
99c139dd5a | ||
|
|
6c34745958 | ||
|
|
2ba4e9bda5 | ||
|
|
2a00c931e4 | ||
|
|
1e6ad5acb6 | ||
|
|
bc74a37696 | ||
|
|
0cb326b10f | ||
|
|
4daa9aa443 | ||
|
|
667364143c | ||
|
|
d8b357ce49 | ||
|
|
479d0e858d | ||
|
|
152c15b19f | ||
|
|
ce5a34d86c | ||
|
|
b8bafb2893 | ||
|
|
5da18854a3 | ||
|
|
d269e7f46c | ||
|
|
8c636f67af | ||
|
|
a84508d6b9 | ||
|
|
22c234040e | ||
|
|
4a1be18361 | ||
|
|
3b6df70f11 | ||
|
|
48390d37c2 | ||
|
|
0d3beb4e5a | ||
|
|
6e192d487b | ||
|
|
306c61a968 | ||
|
|
58b4fea2b1 | ||
|
|
2149bfbc5b | ||
|
|
0acb823d96 | ||
|
|
8ba6b601b0 | ||
|
|
06f981ffed | ||
|
|
4a4a3f759c | ||
|
|
8fbebf4e83 | ||
|
|
8371beb915 | ||
|
|
b39b32a48c | ||
|
|
dbe99b59a7 | ||
|
|
3cc413fe9a | ||
|
|
59d0138bcd | ||
|
|
801bbcbc63 | ||
|
|
4560ff7386 | ||
|
|
477841d8c0 | ||
|
|
a89dc49c52 | ||
|
|
90d8f7aa6a | ||
|
|
abc1421def | ||
|
|
7abd4eeafd | ||
|
|
27c79088e6 | ||
|
|
ce2a3a80db | ||
|
|
79bf74e90f | ||
|
|
4f12ba00d1 | ||
|
|
93104d9224 | ||
|
|
b5f4a79365 | ||
|
|
7e84de2ae1 | ||
|
|
06e1f81801 | ||
|
|
ccc3f89060 | ||
|
|
8a17de327e | ||
|
|
3b787123e3 | ||
|
|
127969d65f | ||
|
|
4b711e29ef | ||
|
|
2a3aa591e0 | ||
|
|
56cafc3fb3 | ||
|
|
b434456f54 | ||
|
|
6923d350f4 | ||
|
|
38beab8fe8 | ||
|
|
4bdd0ce417 | ||
|
|
e5c4e1ecc3 | ||
|
|
a48131f1e1 | ||
|
|
6e34918b52 | ||
|
|
66fddb2d52 | ||
|
|
87193fd270 | ||
|
|
52b5309385 | ||
|
|
7f4e048052 | ||
|
|
7fe7357154 | ||
|
|
635261eb12 | ||
|
|
5a5cf15c04 | ||
|
|
4d2b6b71f2 | ||
|
|
7bec169d58 | ||
|
|
bfd4ccbeaa | ||
|
|
76fe84fe93 | ||
|
|
cf4dd645a7 | ||
|
|
a5edd0d709 | ||
|
|
c33e08c3fa | ||
|
|
9b773185c3 | ||
|
|
b1974ab3cf | ||
|
|
b4e2b85745 | ||
|
|
fcd8d850dc | ||
|
|
6549b8f8ae | ||
|
|
1ea7ec3189 | ||
|
|
4077934519 | ||
|
|
fac8aaa44e | ||
|
|
fd422a0646 | ||
|
|
d7bb4d954a | ||
|
|
36322901a6 | ||
|
|
31d8fc086b | ||
|
|
9ca583d984 | ||
|
|
3ce933051a | ||
|
|
3b750cafc1 | ||
|
|
1911fe5ca8 | ||
|
|
2117e8167d | ||
|
|
c66d1ad6cb | ||
|
|
bd0383a4e3 | ||
|
|
dd5327ef9e | ||
|
|
cdce8c81da | ||
|
|
3f786542d3 | ||
|
|
e813b26963 | ||
|
|
abe711dcb5 | ||
|
|
b35a9fcb04 | ||
|
|
c27a91f7f0 | ||
|
|
2ab2f3a0a3 | ||
|
|
c825f4d180 | ||
|
|
7e195b06a6 | ||
|
|
30132d8c35 | ||
|
|
0d89db5141 | ||
|
|
4b7d59224d | ||
|
|
72157a7514 | ||
|
|
4f51607145 | ||
|
|
6602760a48 | ||
|
|
9232261665 | ||
|
|
af97febb04 | ||
|
|
c818dc1ce8 | ||
|
|
ad50f376a5 | ||
|
|
83838b7fbc |
140
.gitlab-ci.yml
140
.gitlab-ci.yml
@@ -7,7 +7,7 @@ stages:
|
||||
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
@@ -42,7 +42,7 @@ before_script:
|
||||
tags:
|
||||
- kubernetes
|
||||
- docker
|
||||
image: quay.io/kubespray/kubespray:latest
|
||||
image: quay.io/kubespray/kubespray:v2.7
|
||||
|
||||
.docker_service: &docker_service
|
||||
services:
|
||||
@@ -93,10 +93,10 @@ before_script:
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kubespray repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
|
||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||
|
||||
|
||||
@@ -244,10 +244,6 @@ before_script:
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
@@ -256,6 +252,10 @@ before_script:
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
@@ -276,7 +276,7 @@ before_script:
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.debian8_calico_variables: &debian8_calico_variables
|
||||
.debian9_calico_variables: &debian9_calico_variables
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
@@ -296,23 +296,31 @@ before_script:
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_kube_router_variables: ¢os7_kube_router_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||
# stage: deploy-part2
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_kube_router_variables: &coreos_kube_router_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||
# stage: deploy-part1
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "basic"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
@@ -323,12 +331,13 @@ before_script:
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
### PR JOBS PART1
|
||||
gce_coreos-calico-aio:
|
||||
|
||||
gce_ubuntu18-flannel-aio:
|
||||
stage: deploy-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *coreos_calico_aio_variables
|
||||
<<: *ubuntu18_flannel_aio_variables
|
||||
<<: *gce_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
@@ -336,14 +345,14 @@ gce_coreos-calico-aio:
|
||||
|
||||
### PR JOBS PART2
|
||||
|
||||
gce_ubuntu18-flannel-aio:
|
||||
gce_coreos-calico-aio:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *ubuntu18_flannel_aio_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
<<: *gce_variables
|
||||
when: manual
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
@@ -358,7 +367,7 @@ gce_centos7-flannel-addons:
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
gce_centos-weave-kubeadm:
|
||||
gce_centos-weave-kubeadm-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
@@ -369,6 +378,19 @@ gce_centos-weave-kubeadm:
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -376,11 +398,10 @@ gce_ubuntu-weave-sep:
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_weave_sep_variables
|
||||
when: on_success
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### MANUAL JOBS
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -392,7 +413,7 @@ gce_coreos-calico-sep-triggers:
|
||||
only: ['triggers']
|
||||
|
||||
gce_ubuntu-canal-ha-triggers:
|
||||
stage: deploy-part2
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -434,7 +455,7 @@ do_ubuntu-canal-ha:
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-canal-ha:
|
||||
stage: deploy-part2
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -529,24 +550,24 @@ gce_rhel7-weave-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_debian8-calico-upgrade:
|
||||
gce_debian9-calico-upgrade:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_calico_variables
|
||||
<<: *debian9_calico_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_debian8-calico-triggers:
|
||||
gce_debian9-calico-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_calico_variables
|
||||
<<: *debian9_calico_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
@@ -613,6 +634,28 @@ gce_centos7-calico-ha-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_centos7-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_kube_router_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_multus_calico_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -636,6 +679,17 @@ gce_coreos-alpha-weave-ha:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_kube_router_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-rkt-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -647,35 +701,13 @@ gce_ubuntu-rkt-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-vault-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_vault_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-vault-upgrade:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_vault_upgrade_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-flannel-sep:
|
||||
gce_ubuntu-kube-router-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
<<: *ubuntu_kube_router_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -6,11 +6,11 @@ RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python-dev sshpass apt-transport-https \
|
||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||
|
||||
5
Makefile
Normal file
5
Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
mitogen:
|
||||
ansible-playbook -c local mitogen.yaml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
57
README.md
57
README.md
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
Deploy a Production Ready Kubernetes Cluster
|
||||
============================================
|
||||
@@ -17,13 +17,22 @@ Quick Start
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
### Current release
|
||||
2.8.2
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Ansible version
|
||||
|
||||
Ansible v2.7.0 is failing and/or produce unexpected results due to [ansible/ansible/issues/46600](https://github.com/ansible/ansible/issues/46600)
|
||||
|
||||
#### Usage
|
||||
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample/* inventory/mycluster
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
@@ -33,8 +42,11 @@ To deploy the cluster you can use :
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `-b` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without -b the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini --become --become-user=root cluster.yml
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
@@ -89,7 +101,7 @@ Supported Linux Distributions
|
||||
-----------------------------
|
||||
|
||||
- **Container Linux by CoreOS**
|
||||
- **Debian** Jessie, Stretch, Wheezy
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04
|
||||
- **CentOS/RHEL** 7
|
||||
- **Fedora** 28
|
||||
@@ -102,25 +114,27 @@ Supported Components
|
||||
--------------------
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.11.3
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.18
|
||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.12.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.24
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||
- Network Plugin
|
||||
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.4.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.0
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.2.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
|
||||
|
||||
Note: kubernetes doesn't support newer docker versions ("Version 17.03 is recommended... Versions 17.06+ might work, but have not yet been tested and verified by the Kubernetes node team" cf. [Bootstrapping Clusters with kubeadm](https://kubernetes.io/docs/setup/independent/install-kubeadm/#installing-docker)). Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||
@@ -130,10 +144,10 @@ plugins can be deployed for a given single cluster.
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||
- **Ansible v2.5 (or newer) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images.
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
@@ -161,6 +175,13 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
|
||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
@@ -178,7 +199,7 @@ Tools and projects on top of Kubespray
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
CI Tests
|
||||
--------
|
||||
|
||||
153
Vagrantfile
vendored
153
Vagrantfile
vendored
@@ -1,6 +1,8 @@
|
||||
# -*- mode: ruby -*-
|
||||
# # vi: set ft=ruby :
|
||||
|
||||
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 2.0.0"
|
||||
@@ -13,14 +15,16 @@ COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", bootstrap_os: "fedora", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
@@ -32,8 +36,10 @@ $vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$os = "ubuntu"
|
||||
$os = "ubuntu1804"
|
||||
$network_plugin = "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking = false
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances = $num_instances
|
||||
# The first two nodes are kube masters
|
||||
@@ -47,8 +53,6 @@ $kube_node_instances_with_disks_number = 2
|
||||
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
$local_release_dir = "/vagrant/temp"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
@@ -57,13 +61,13 @@ end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
|
||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
||||
# to where vagrant expects dynamic inventory to be.
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant",
|
||||
"provisioners", "ansible")
|
||||
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||
# vars etc to where vagrant expects dynamic inventory to be
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
@@ -78,80 +82,60 @@ if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
|
||||
config.vm.box = $box
|
||||
if SUPPORTED_OS[$os].has_key? :box_url
|
||||
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||
end
|
||||
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||
|
||||
# plugin conflict
|
||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||
config.vbguest.auto_update = false
|
||||
end
|
||||
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
|
||||
(1..$num_instances).each do |i|
|
||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||
config.vm.hostname = vm_name
|
||||
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
|
||||
|
||||
node.vm.hostname = vm_name
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||
config.proxy.no_proxy = $no_proxy
|
||||
end
|
||||
|
||||
if $expose_docker_tcp
|
||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||
end
|
||||
|
||||
$forwarded_ports.each do |guest, host|
|
||||
config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||
node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||
node.proxy.no_proxy = $no_proxy
|
||||
end
|
||||
|
||||
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
||||
config.vm.provider vmware do |v|
|
||||
node.vm.provider vmware do |v|
|
||||
v.vmx['memsize'] = $vm_memory
|
||||
v.vmx['numvcpus'] = $vm_cpus
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
|
||||
$shared_folders.each do |src, dst|
|
||||
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.gui = $vm_gui
|
||||
node.vm.provider :virtualbox do |vb|
|
||||
vb.memory = $vm_memory
|
||||
vb.cpus = $vm_cpus
|
||||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
end
|
||||
|
||||
config.vm.provider :libvirt do |lv|
|
||||
lv.memory = $vm_memory
|
||||
# Fix kernel panic on fedora 28
|
||||
if $os == "fedora"
|
||||
lv.cpu_mode = "host-passthrough"
|
||||
end
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
||||
"local_release_dir" => $local_release_dir,
|
||||
"download_run_once": "False",
|
||||
"kube_network_plugin": $network_plugin
|
||||
}
|
||||
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
config.vm.provision "shell", inline: "swapoff -a"
|
||||
node.vm.provider :libvirt do |lv|
|
||||
lv.memory = $vm_memory
|
||||
lv.cpus = $vm_cpus
|
||||
lv.default_prefix = 'kubespray'
|
||||
# Fix kernel panic on fedora 28
|
||||
if $os == "fedora"
|
||||
lv.cpu_mode = "host-passthrough"
|
||||
end
|
||||
end
|
||||
|
||||
if $kube_node_instances_with_disks
|
||||
# Libvirt
|
||||
driverletters = ('a'..'z').to_a
|
||||
config.vm.provider :libvirt do |lv|
|
||||
node.vm.provider :libvirt do |lv|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
@@ -160,24 +144,51 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
end
|
||||
|
||||
# Only execute once the Ansible provisioner,
|
||||
# when all the machines are up and ready.
|
||||
if $expose_docker_tcp
|
||||
node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||
end
|
||||
|
||||
$forwarded_ports.each do |guest, host|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"kube_network_plugin": $network_plugin,
|
||||
"kube_network_plugin_multus": $multi_networking,
|
||||
"docker_keepcache": "1",
|
||||
"download_run_once": "True",
|
||||
"download_localhost": "False"
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
if i == $num_instances
|
||||
config.vm.provision "ansible" do |ansible|
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
if File.exist?(File.join( $inventory, "hosts.ini"))
|
||||
ansible.inventory_path = $inventory
|
||||
end
|
||||
ansible.become = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "--ask-become-pass"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -3,6 +3,8 @@ pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
|
||||
host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
|
||||
52
cluster.yml
52
cluster.yml
@@ -1,5 +1,33 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: "Check ansible version !=2.7.0"
|
||||
assert:
|
||||
msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.0", "!=")
|
||||
- ansible_version.string is version("2.5.0", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: deploy warning for non kubeadm
|
||||
debug:
|
||||
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
|
||||
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||
|
||||
- name: deploy cluster for non kubeadm
|
||||
pause:
|
||||
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
|
||||
echo: no
|
||||
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
@@ -33,21 +61,10 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker, when: container_manager == 'docker' }
|
||||
- { role: cri-o, tags: crio, when: container_manager == 'crio' }
|
||||
- role: rkt
|
||||
tags: rkt
|
||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
@@ -60,13 +77,6 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
@@ -94,7 +104,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -116,7 +126,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import boto3
|
||||
import os
|
||||
import argparse
|
||||
@@ -13,7 +14,7 @@ class SearchEC2Tags(object):
|
||||
self.search_tags()
|
||||
if self.args.host:
|
||||
data = {}
|
||||
print json.dumps(data, indent=2)
|
||||
print(json.dumps(data, indent=2))
|
||||
|
||||
def parse_args(self):
|
||||
|
||||
@@ -44,18 +45,29 @@ class SearchEC2Tags(object):
|
||||
|
||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||
for instance in instances:
|
||||
|
||||
##Suppose default vpc_visibility is private
|
||||
dns_name = instance.private_dns_name
|
||||
ansible_host = {
|
||||
'ansible_ssh_host': instance.private_ip_address
|
||||
}
|
||||
|
||||
##Override when vpc_visibility actually is public
|
||||
if self.vpc_visibility == "public":
|
||||
hosts[group].append(instance.public_dns_name)
|
||||
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||
'ansible_ssh_host': instance.public_ip_address
|
||||
}
|
||||
else:
|
||||
hosts[group].append(instance.private_dns_name)
|
||||
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||
'ansible_ssh_host': instance.private_ip_address
|
||||
dns_name = instance.public_dns_name
|
||||
ansible_host = {
|
||||
'ansible_ssh_host': instance.public_ip_address
|
||||
}
|
||||
|
||||
##Set when instance actually has node_labels
|
||||
node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags))
|
||||
if node_labels_tag:
|
||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||
# this name must be globally unique - it will be used as a prefix for azure components
|
||||
cluster_name: example
|
||||
|
||||
@@ -7,6 +7,10 @@ cluster_name: example
|
||||
# node that can be used to access the masters and minions
|
||||
use_bastion: false
|
||||
|
||||
# Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
||||
# bastion_domain_prefix: k8s-bastion
|
||||
|
||||
number_of_k8s_masters: 3
|
||||
number_of_k8s_nodes: 3
|
||||
|
||||
@@ -20,7 +24,8 @@ admin_username: devops
|
||||
admin_password: changeme
|
||||
|
||||
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||
ssh_public_keys:
|
||||
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||
|
||||
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
{% for vm in vm_ip_list %}
|
||||
{% for vm in vm_ip_list %}
|
||||
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||
{% else %}
|
||||
|
||||
@@ -15,7 +15,12 @@
|
||||
"name": "{{bastionIPAddressName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static"
|
||||
"publicIPAllocationMethod": "Static",
|
||||
"dnsSettings": {
|
||||
{% if bastion_domain_prefix %}
|
||||
"domainNameLabel": "{{ bastion_domain_prefix }}"
|
||||
{% endif %}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -66,10 +71,12 @@
|
||||
"disablePasswordAuthentication": "true",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{% for key in ssh_public_keys %}
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
"keyData": "{{key}}"
|
||||
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,10 +162,12 @@
|
||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{% for key in ssh_public_keys %}
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
"keyData": "{{key}}"
|
||||
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,10 +79,12 @@
|
||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{% for key in ssh_public_keys %}
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
"keyData": "{{key}}"
|
||||
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
176
contrib/dind/README.md
Normal file
176
contrib/dind/README.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# Kubespray DIND experimental setup
|
||||
|
||||
This ansible playbook creates local docker containers
|
||||
to serve as Kubernetes "nodes", which in turn will run
|
||||
"normal" Kubernetes docker containers, a mode usually
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
- dind-cluster: customizes each node container to have required
|
||||
system packages installed, and some utils (swapoff, lsattr)
|
||||
symlinked to /bin/true to ease mimicking a real node.
|
||||
|
||||
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
|
||||
as docker images (note that dind-cluster has specific customization
|
||||
for these images).
|
||||
|
||||
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
|
||||
helper (wraps up running `contrib/inventory_builder/inventory.py` with
|
||||
node containers IPs and prefix).
|
||||
|
||||
## Deploying
|
||||
|
||||
See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
~~~~
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
|
||||
ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
~~~~
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
~~~
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
~~~
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
~~~
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
~~~
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
~~~
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
~~~
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
See below to get an idea on how a completed deployment looks like,
|
||||
from the host where you ran kubespray playbooks.
|
||||
|
||||
### node_distro: debian
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
~~~
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
|
||||
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
|
||||
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
|
||||
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
|
||||
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
|
||||
|
||||
$ docker exec kube-node1 kubectl get node
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-node1 Ready master,node 18m v1.12.1
|
||||
kube-node2 Ready master,node 17m v1.12.1
|
||||
kube-node3 Ready node 17m v1.12.1
|
||||
kube-node4 Ready node 17m v1.12.1
|
||||
kube-node5 Ready node 17m v1.12.1
|
||||
|
||||
$ docker exec kube-node1 kubectl get pod --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
default netchecker-agent-67489 1/1 Running 0 2m51s
|
||||
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fsw92 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
|
||||
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
|
||||
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
|
||||
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
|
||||
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
|
||||
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
|
||||
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
|
||||
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
|
||||
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
|
||||
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
|
||||
kube-system kube-proxy-n64ls 1/1 Running 0 17m
|
||||
kube-system kube-proxy-pswmj 1/1 Running 0 18m
|
||||
kube-system kube-proxy-x89qw 1/1 Running 0 18m
|
||||
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
|
||||
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
|
||||
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
|
||||
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
|
||||
kube-system weave-net-42bfr 2/2 Running 0 16m
|
||||
kube-system weave-net-6gt8m 2/2 Running 0 16m
|
||||
kube-system weave-net-88nnc 2/2 Running 0 16m
|
||||
kube-system weave-net-shckr 2/2 Running 0 16m
|
||||
kube-system weave-net-xr46t 2/2 Running 0 16m
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
~~~
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
~~~
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
~~~
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
set of CNI specific `--extra-vars` combo.
|
||||
9
contrib/dind/dind-cluster.yaml
Normal file
9
contrib/dind/dind-cluster.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
2
contrib/dind/group_vars/all/all.yaml
Normal file
2
contrib/dind/group_vars/all/all.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
40
contrib/dind/group_vars/all/distro.yaml
Normal file
40
contrib/dind/group_vars/all/distro.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
user: "debian"
|
||||
pid1_exe: /lib/systemd/systemd
|
||||
init: |
|
||||
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
|
||||
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@*
|
||||
ssh_service: ssh
|
||||
extra_packages: []
|
||||
ubuntu:
|
||||
<<: *DEBIAN
|
||||
image: "ubuntu:16.04"
|
||||
user: "ubuntu"
|
||||
init: |
|
||||
/sbin/init
|
||||
centos: &CENTOS
|
||||
image: "centos:7"
|
||||
user: "centos"
|
||||
pid1_exe: /usr/lib/systemd/systemd
|
||||
init: |
|
||||
/sbin/init
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@* serial-getty@*
|
||||
ssh_service: sshd
|
||||
extra_packages: []
|
||||
fedora:
|
||||
<<: *CENTOS
|
||||
image: "fedora:latest"
|
||||
user: "fedora"
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
|
||||
extra_packages:
|
||||
- hostname
|
||||
- procps
|
||||
- findutils
|
||||
- kmod
|
||||
- iputils
|
||||
15
contrib/dind/hosts
Normal file
15
contrib/dind/hosts
Normal file
@@ -0,0 +1,15 @@
|
||||
[local]
|
||||
# If you created a virtualenv for ansible, you may need to specify running the
|
||||
# python binary from there instead:
|
||||
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
|
||||
localhost ansible_connection=local
|
||||
|
||||
[containers]
|
||||
kube-node1
|
||||
kube-node2
|
||||
kube-node3
|
||||
kube-node4
|
||||
kube-node5
|
||||
|
||||
[containers:vars]
|
||||
ansible_connection=docker
|
||||
22
contrib/dind/kubespray-dind.yaml
Normal file
22
contrib/dind/kubespray-dind.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
kubeadm_enabled: true
|
||||
|
||||
kubelet_fail_swap_on: false
|
||||
|
||||
# Docker nodes need to have been created with same "node_distro: debian"
|
||||
# at contrib/dind/group_vars/all/all.yaml
|
||||
bootstrap_os: debian
|
||||
|
||||
docker_version: latest
|
||||
|
||||
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
|
||||
|
||||
dns_mode: coredns
|
||||
|
||||
deploy_netchecker: True
|
||||
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||
netcheck_agent_image_tag: v1.0
|
||||
netcheck_server_image_tag: v1.0
|
||||
1
contrib/dind/requirements.txt
Normal file
1
contrib/dind/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
docker
|
||||
70
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
70
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
|
||||
|
||||
- name: Null-ify some linux tools to ease DIND
|
||||
file:
|
||||
src: "/bin/true"
|
||||
dest: "{{item}}"
|
||||
state: link
|
||||
force: yes
|
||||
with_items:
|
||||
# DIND box may have swap enable, don't bother
|
||||
- /sbin/swapoff
|
||||
# /etc/hosts handling would fail on trying to copy file attributes on edit,
|
||||
# void it by successfully returning nil output
|
||||
- /usr/bin/lsattr
|
||||
# disable selinux-isms, sp needed if running on non-Selinux host
|
||||
- /usr/sbin/semodule
|
||||
|
||||
- name: Void installing dpkg docs and man pages on Debian based distros
|
||||
copy:
|
||||
content: |
|
||||
# Delete locales
|
||||
path-exclude=/usr/share/locale/*
|
||||
# Delete man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
# Delete docs
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install system packages to better match a full-fledge node
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
with_items:
|
||||
- rsyslog
|
||||
- "{{ distro_ssh_service }}"
|
||||
|
||||
- name: Create distro user "{{distro_user}}"
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
#groups: sudo
|
||||
append: yes
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
86
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
86
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
@@ -0,0 +1,86 @@
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
|
||||
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
|
||||
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
#recreate: yes
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
volumes:
|
||||
- /boot:/boot
|
||||
- /lib/modules:/lib/modules
|
||||
- "{{ item }}:/dind/docker"
|
||||
register: containers
|
||||
with_items: "{{groups.containers}}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Gather list of containers IPs
|
||||
set_fact:
|
||||
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Create inventory_builder helper already set with the list of node containers' IPs
|
||||
template:
|
||||
src: inventory_builder.sh.j2
|
||||
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||
mode: 0755
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
|
||||
raw: |
|
||||
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
|
||||
pkill -STOP agetty || true
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
|
||||
- name: Remove gettys from node containers
|
||||
raw: |
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
|
||||
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
|
||||
93
contrib/dind/run-test-distros.sh
Executable file
93
contrib/dind/run-test-distros.sh
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
# Q&D test'em all: creates full DIND kubespray deploys
|
||||
# for each distro, verifying it via netchecker.
|
||||
|
||||
info() {
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
echo "INFO: [$date] $msg"
|
||||
}
|
||||
pass_or_fail() {
|
||||
local rc="$?"
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
|
||||
return $rc
|
||||
}
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
# expand $extra with -e in front of each word
|
||||
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
|
||||
ansible-playbook --become -e ansible_ssh_user=$distro -i \
|
||||
${INVENTORY_DIR}/hosts.ini cluster.yml \
|
||||
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
|
||||
pass_or_fail "$prefix: kubespray"
|
||||
) || return 1
|
||||
local node0=${NODES[0]}
|
||||
docker exec ${node0} kubectl get pod --all-namespaces
|
||||
pass_or_fail "$prefix: kube-api" || return 1
|
||||
let retries=60
|
||||
while ((retries--)); do
|
||||
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
|
||||
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
|
||||
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
|
||||
sleep 2
|
||||
done
|
||||
[ $retries -ge 0 ]
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
mkdir -p ${OUTPUT_DIR}
|
||||
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
|
||||
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
|
||||
for spec in ${SPECS}; do
|
||||
unset DISTROS EXTRAS
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
time test_distro ${distro} ${extra}
|
||||
} |& tee ${file_out}
|
||||
# sleeping for the sake of the human to verify if they want
|
||||
sleep 2m
|
||||
done
|
||||
done
|
||||
done
|
||||
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)
|
||||
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
@@ -0,0 +1,11 @@
|
||||
# Test spec file: used from ./run-test-distros.sh, will run
|
||||
# each distro in $DISTROS overloading main kubespray ansible-playbook run
|
||||
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
|
||||
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
|
||||
DISTROS=(debian ubuntu centos fedora)
|
||||
|
||||
# Each line below will be added as --extra-vars to main playbook run
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico'
|
||||
'kube_network_plugin=weave'
|
||||
)
|
||||
8
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
8
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
@@ -0,0 +1,8 @@
|
||||
DISTROS=(debian centos)
|
||||
NETCHECKER_HOST=${NODES[0]}
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
|
||||
)
|
||||
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
@@ -0,0 +1,8 @@
|
||||
DISTROS=(debian centos)
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=canal {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=cilium {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=flannel {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=weave {"kubeadm_enabled":true}'
|
||||
)
|
||||
@@ -20,7 +20,7 @@ BuildRequires: python2-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible >= 2.4.0
|
||||
Requires: ansible >= 2.5.0
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
Requires: python-pbr
|
||||
|
||||
@@ -43,7 +43,7 @@ ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
||||
Example (this one assumes you are using CoreOS)
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -b --become-user=root --flush-cache
|
||||
```
|
||||
***Using other distrib than CoreOs***
|
||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
@@ -111,9 +111,9 @@ the `AWS CLI` with the following command:
|
||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||
```
|
||||
|
||||
***Ansible Inventory doesnt get created:***
|
||||
***Ansible Inventory doesn't get created:***
|
||||
|
||||
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||
|
||||
**Architecture**
|
||||
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_node}
|
||||
${connection_strings_etcd}
|
||||
${public_ip_address_bastion}
|
||||
|
||||
|
||||
[bastion]
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube-master]
|
||||
@@ -24,4 +25,4 @@ kube-master
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${elb_api_fqdn}
|
||||
|
||||
@@ -8,6 +8,23 @@ Openstack.
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
|
||||
### Known compatible public clouds
|
||||
- [Auro](https://auro.io/)
|
||||
- [BetaCloud](https://www.betacloud.io/)
|
||||
- [CityCloud](https://www.citycloud.com/)
|
||||
- [DreamHost](https://www.dreamhost.com/cloud/computing/)
|
||||
- [ELASTX](https://elastx.se/)
|
||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||
- [FugaCloud](https://fuga.cloud/)
|
||||
- [OVH](https://www.ovh.com/)
|
||||
- [Rackspace](https://www.rackspace.com/)
|
||||
- [Ultimum](https://ultimum.io/)
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
|
||||
### Known incompatible public clouds
|
||||
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
|
||||
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
@@ -225,6 +242,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
@@ -341,11 +359,6 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
### Configure cluster variables
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||
- Set variable **bootstrap_os** appropriately for your desired image:
|
||||
```
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: coreos
|
||||
```
|
||||
- **bin_dir**:
|
||||
```
|
||||
# Directory where the binaries will be installed
|
||||
@@ -424,14 +437,6 @@ $ kubectl config use-context default-system
|
||||
kubectl version
|
||||
```
|
||||
|
||||
If you are using floating ip addresses then you may get this error:
|
||||
```
|
||||
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
||||
```
|
||||
|
||||
You can tell kubectl to ignore this condition by adding the
|
||||
`--insecure-skip-tls-verify` option.
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
|
||||
@@ -6,6 +6,7 @@ module "network" {
|
||||
subnet_cidr = "${var.subnet_cidr}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
use_neutron = "${var.use_neutron}"
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
@@ -53,6 +54,7 @@ module "compute" {
|
||||
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
@@ -52,12 +52,13 @@ resource "openstack_networking_secgroup_v2" "worker" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
count = "${length(var.worker_allowed_ports)}"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "30000"
|
||||
port_range_max = "32767"
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
protocol = "${lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")}"
|
||||
port_range_min = "${lookup(var.worker_allowed_ports[count.index], "port_range_min")}"
|
||||
port_range_max = "${lookup(var.worker_allowed_ports[count.index], "port_range_max")}"
|
||||
remote_ip_prefix = "${lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,9 @@ variable "network_name" {}
|
||||
|
||||
variable "flavor_bastion" {}
|
||||
|
||||
variable "network_id" {}
|
||||
variable "network_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = "list"
|
||||
@@ -71,3 +73,7 @@ variable "supplementary_master_groups" {
|
||||
variable "supplementary_node_groups" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
@@ -12,4 +12,6 @@ variable "external_net" {}
|
||||
|
||||
variable "network_name" {}
|
||||
|
||||
variable "router_id" {}
|
||||
variable "router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
count = "${var.use_neutron}"
|
||||
admin_state_up = "true"
|
||||
external_network_id = "${var.external_net}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
count = "${var.use_neutron}"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
count = "${var.use_neutron}"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
cidr = "${var.subnet_cidr}"
|
||||
ip_version = 4
|
||||
@@ -18,6 +21,7 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
count = "${var.use_neutron}"
|
||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
output "router_id" {
|
||||
value = "${openstack_networking_router_v2.k8s.id}"
|
||||
value = "${element(concat(openstack_networking_router_v2.k8s.*.id, list("")), 0)}"
|
||||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, list("")), 0)}"
|
||||
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, list("")), 0)}"
|
||||
}
|
||||
|
||||
@@ -9,3 +9,5 @@ variable "dns_nameservers" {
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {}
|
||||
|
||||
variable "use_neutron" {}
|
||||
|
||||
@@ -103,6 +103,11 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "use_neutron" {
|
||||
description = "Use neutron"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
description = "Subnet CIDR block."
|
||||
type = "string"
|
||||
@@ -139,3 +144,15 @@ variable "bastion_allowed_remote_ips" {
|
||||
type = "list"
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = "list"
|
||||
default = [
|
||||
{
|
||||
"protocol" = "tcp"
|
||||
"port_range_min" = 30000
|
||||
"port_range_max" = 32767
|
||||
"remote_ip_prefix" = "0.0.0.0/0"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -328,6 +328,7 @@ def openstack_host(resource, module_name):
|
||||
attrs = {
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'access_ip': raw_attrs['access_ip_v4'],
|
||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
@@ -685,6 +686,7 @@ def iter_host_ips(hosts, ips):
|
||||
ip = ips[host_id]
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
'access_ip': ip,
|
||||
'public_ipv4': ip,
|
||||
'ansible_ssh_host': ip,
|
||||
})
|
||||
|
||||
31
contrib/vault/groups_vars/vault.yaml
Normal file
31
contrib/vault/groups_vars/vault.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
vault_downloads:
|
||||
vault:
|
||||
enabled: "{{ cert_management == 'vault' }}"
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
file: "{{ vault_deployment_type == 'host' }}"
|
||||
dest: "{{local_release_dir}}/vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
mode: "0755"
|
||||
owner: "vault"
|
||||
repo: "{{ vault_image_repo }}"
|
||||
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||
tag: "{{ vault_image_tag }}"
|
||||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
groups:
|
||||
- vault
|
||||
|
||||
# Vault data dirs.
|
||||
vault_base_dir: /etc/vault
|
||||
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||
kube_vault_mount_path: "/kube"
|
||||
etcd_vault_mount_path: "/etcd"
|
||||
1
contrib/vault/requirements.txt
Normal file
1
contrib/vault/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
ansible-modules-hashivault>=3.9.4
|
||||
@@ -23,6 +23,9 @@ vault_version: 0.10.1
|
||||
vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
|
||||
# hvac==0.7.0 is broken at the moment
|
||||
hvac_version: 0.6.4
|
||||
|
||||
# Arch of Docker images and needed packages
|
||||
image_arch: "{{host_architecture}}"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
until: vault_health_check|succeeded
|
||||
until: vault_health_check is succeeded
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: yes
|
||||
@@ -20,7 +20,7 @@
|
||||
url: "http://localhost:{{ vault_port }}/"
|
||||
secret_shares: 1
|
||||
secret_threshold: 1
|
||||
until: "vault_temp_init|succeeded"
|
||||
until: "vault_temp_init is succeeded"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
register: vault_temp_init
|
||||
@@ -12,8 +12,9 @@
|
||||
|
||||
- name: install hvac
|
||||
pip:
|
||||
name: "hvac"
|
||||
name: "hvac=={{ hvac_version }}"
|
||||
state: "present"
|
||||
extra_args: "{{ pip_extra_args | default(omit) }}"
|
||||
|
||||
## Bootstrap
|
||||
- include_tasks: bootstrap/main.yml
|
||||
@@ -4,7 +4,7 @@
|
||||
shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }}
|
||||
failed_when: false
|
||||
register: vault_temp_stop
|
||||
changed_when: vault_temp_stop|succeeded
|
||||
changed_when: vault_temp_stop is succeeded
|
||||
|
||||
# Check if vault is reachable on the localhost
|
||||
- name: check_vault | Attempt to pull local https Vault health
|
||||
@@ -7,7 +7,7 @@
|
||||
method: HEAD
|
||||
status_code: 200,429,501,503
|
||||
register: vault_leader_check
|
||||
until: "vault_leader_check|succeeded"
|
||||
until: "vault_leader_check is succeeded"
|
||||
retries: 10
|
||||
|
||||
- name: find_leader | Set fact for current http leader
|
||||
@@ -52,6 +52,7 @@
|
||||
pip:
|
||||
name: "hvac"
|
||||
state: "present"
|
||||
extra_args: "{{ pip_extra_args | default(omit) }}"
|
||||
|
||||
- name: gen_certs_vault | Pull vault CA
|
||||
get_url:
|
||||
@@ -1,3 +1,6 @@
|
||||
# /!\ The vault role have been retired from the main playbook.
|
||||
# This role probably requires a LOT of changes in order to work again
|
||||
|
||||
Hashicorp Vault Role
|
||||
====================
|
||||
|
||||
@@ -8,7 +11,7 @@ The Vault role is a two-step process:
|
||||
|
||||
1. Bootstrap
|
||||
|
||||
You cannot start your certificate management service securely with SSL (and
|
||||
You cannot start your certificate management service securely with SSL (and
|
||||
the datastore behind it) without having the certificates in-hand already. This
|
||||
presents an unfortunate chicken and egg scenario, with one requiring the other.
|
||||
To solve for this, the Bootstrap step was added.
|
||||
@@ -80,7 +83,7 @@ Additional Notes:
|
||||
|
||||
- ``groups.vault|first`` is considered the source of truth for Vault variables
|
||||
- ``vault_leader_url`` is used as pointer for the current running Vault
|
||||
- Each service should have its own role and credentials. Currently those
|
||||
- Each service should have its own role and credentials. Currently those
|
||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||
need to read in those credentials, if they want to interact with Vault.
|
||||
|
||||
@@ -72,11 +72,11 @@ Mandatory variables that are common for at least one role (or a node group) can
|
||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
those cannot be overriden from the group vars. In order to override, one should use
|
||||
those cannot be overridden from the group vars. In order to override, one should use
|
||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||
|
||||
Kubespray uses only a few layers to override things (or expect them to
|
||||
be overriden for roles):
|
||||
be overridden for roles):
|
||||
|
||||
Layer | Comment
|
||||
------|--------
|
||||
|
||||
@@ -3,7 +3,7 @@ AWS
|
||||
|
||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider.
|
||||
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
|
||||
|
||||
@@ -79,6 +79,13 @@ you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
```
|
||||
|
||||
##### Optional : Defining BGP peers
|
||||
|
||||
Peers can be defined using the `peers` variable (see docs/calico_peer_example examples).
|
||||
In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global".
|
||||
In order to define peers on a per node basis, the `peers` variable must be defined in hostvars.
|
||||
NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
|
||||
|
||||
##### Optional : Define global AS number
|
||||
|
||||
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||
|
||||
@@ -19,7 +19,7 @@ on. Had it belonged to the new [operators world](https://coreos.com/blog/introdu
|
||||
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||
does generic configuration management tasks from the "OS operators" ansible
|
||||
world, plus some initial K8s clustering (with networking plugins included) and
|
||||
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
|
||||
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-sigs/kubespray/issues/553)
|
||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||
knowledge from it and offload generic OS configuration things from it, which
|
||||
hopefully benefits both sides.
|
||||
|
||||
@@ -6,7 +6,6 @@ Example with Ansible:
|
||||
Before running the cluster playbook you must satisfy the following requirements:
|
||||
|
||||
General CoreOS Pre-Installation Notes:
|
||||
- You should set the bootstrap_os variable to `coreos`
|
||||
- Ensure that the bin_dir is set to `/opt/bin`
|
||||
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||
|
||||
@@ -47,6 +47,7 @@ In case your servers don't have access to internet (for example when deploying o
|
||||
|
||||
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
||||
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||
* `pyrepo_index` (and optionally `pyrepo_cert`)
|
||||
* Depending on the `container_manager`
|
||||
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
||||
* When `container_manager=crio`, `crio_rhel_repo_base_url`
|
||||
|
||||
@@ -6,10 +6,10 @@ Building your own inventory
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
|
||||
[here](https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/hosts.ini).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||
[inventory generator](https://github.com/kubernetes-sigs/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||
to create or modify an Ansible inventory. Currently, it is limited in
|
||||
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
|
||||
support creating inventory file for large clusters as well. It now supports
|
||||
@@ -41,7 +41,7 @@ Adding nodes
|
||||
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||
|
||||
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
|
||||
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
|
||||
@@ -10,33 +10,7 @@ achieve the same goal.
|
||||
|
||||
Etcd
|
||||
----
|
||||
|
||||
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overriden in group_vars
|
||||
* `etcd_access_addresses`
|
||||
* `etcd_client_url`
|
||||
* `etcd_cert_alt_names`
|
||||
* `etcd_cert_alt_ips`
|
||||
|
||||
### Example of a VIP w/ FQDN
|
||||
```yaml
|
||||
etcd_access_addresses: https://etcd.example.com:2379
|
||||
etcd_client_url: https://etcd.example.com:2379
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||
- "etcd.kube-system.svc"
|
||||
- "etcd.kube-system"
|
||||
- "etcd"
|
||||
- "etcd.example.com" # This one needs to be added to the default etcd_cert_alt_names
|
||||
```
|
||||
|
||||
### Example of a VIP w/o FQDN (IP only)
|
||||
|
||||
```yaml
|
||||
etcd_access_addresses: https://2.3.7.9:2379
|
||||
etcd_client_url: https://2.3.7.9:2379
|
||||
etcd_cert_alt_ips:
|
||||
- "2.3.7.9"
|
||||
```
|
||||
The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already.
|
||||
|
||||
Kube-apiserver
|
||||
--------------
|
||||
@@ -120,12 +94,12 @@ for it.
|
||||
be covered by Kubespray for that case. Make sure your external LB provides it.
|
||||
Alternatively you may specify an externally load balanced VIPs in the
|
||||
`supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into
|
||||
the generated cluster certifactes as well.
|
||||
the generated cluster certificates as well.
|
||||
|
||||
Aside of that specific case, the `loadbalancer_apiserver` considered mutually
|
||||
exclusive to `loadbalancer_apiserver_localhost`.
|
||||
|
||||
Access API endpoints are evaluated automagically, as the following:
|
||||
Access API endpoints are evaluated automatically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master | external |
|
||||
|------------------------------|----------------|---------------------|---------------------|
|
||||
@@ -157,3 +131,33 @@ contacted via the local bind IP, which is `https://bip:sp`.
|
||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||
`kube_apiserver_insecure_port`.
|
||||
|
||||
Optional configurations
|
||||
------------------------
|
||||
### ETCD with a LB
|
||||
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars
|
||||
* `etcd_access_addresses`
|
||||
* `etcd_client_url`
|
||||
* `etcd_cert_alt_names`
|
||||
* `etcd_cert_alt_ips`
|
||||
|
||||
#### Example of a VIP w/ FQDN
|
||||
```yaml
|
||||
etcd_access_addresses: https://etcd.example.com:2379
|
||||
etcd_client_url: https://etcd.example.com:2379
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||
- "etcd.kube-system.svc"
|
||||
- "etcd.kube-system"
|
||||
- "etcd"
|
||||
- "etcd.example.com" # This one needs to be added to the default etcd_cert_alt_names
|
||||
```
|
||||
|
||||
#### Example of a VIP w/o FQDN (IP only)
|
||||
|
||||
```yaml
|
||||
etcd_access_addresses: https://2.3.7.9:2379
|
||||
etcd_client_url: https://2.3.7.9:2379
|
||||
etcd_cert_alt_ips:
|
||||
- "2.3.7.9"
|
||||
```
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# Kubespray (kargo) in own ansible playbooks repo
|
||||
# Kubespray (kubespray) in own ansible playbooks repo
|
||||
|
||||
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
|
||||
1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github.
|
||||
Note:
|
||||
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||
* List of all forked repos could be retrieved from github page of original project.
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||
```
|
||||
[submodule "3d/kubespray"]
|
||||
@@ -14,22 +14,22 @@
|
||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||
```
|
||||
|
||||
3. Configure git to show submodule status:
|
||||
3. Configure git to show submodule status:
|
||||
```git config --global status.submoduleSummary true```
|
||||
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||
|
||||
5. Sync your master branch with upstream:
|
||||
5. Sync your master branch with upstream:
|
||||
```
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
|
||||
6. Create a new branch which you will use in your working environment:
|
||||
```git checkout -b work```
|
||||
|
||||
6. Create a new branch which you will use in your working environment:
|
||||
```git checkout -b work```
|
||||
***Never*** use master branch of your repository for your commits.
|
||||
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||
@@ -43,42 +43,39 @@
|
||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
```
|
||||
...
|
||||
#Kargo groups:
|
||||
[kube-node:children]
|
||||
kubenode
|
||||
|
||||
|
||||
[k8s-cluster:children]
|
||||
kubernetes
|
||||
|
||||
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
|
||||
[kube-master:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[vault:children]
|
||||
kube-master
|
||||
|
||||
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
|
||||
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
|
||||
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
```
|
||||
- name: Include kargo tasks
|
||||
- name: Include kubespray tasks
|
||||
include: 3d/kubespray/cluster.yml
|
||||
```
|
||||
```
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||
|
||||
# Contributing
|
||||
@@ -88,8 +85,8 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
|
||||
|
||||
1. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
2. Setup desired user.name and user.email for submodule.
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
2. Setup desired user.name and user.email for submodule.
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||
|
||||
3. Sync with upstream master:
|
||||
@@ -98,24 +95,24 @@ If kubespray is only one submodule in your repo you could use something like:
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
4. Create new branch for the specific fixes that you want to contribute:
|
||||
```git checkout -b fixes-name-date-index```
|
||||
4. Create new branch for the specific fixes that you want to contribute:
|
||||
```git checkout -b fixes-name-date-index```
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||
```
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||
|
||||
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
Check that you're on correct branch:
|
||||
```git status```
|
||||
And pull changes from upstream (if any):
|
||||
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
Check that you're on correct branch:
|
||||
```git status```
|
||||
And pull changes from upstream (if any):
|
||||
```git pull --rebase upstream master```
|
||||
|
||||
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||
|
||||
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
|
||||
108
docs/kube-router.md
Normal file
108
docs/kube-router.md
Normal file
@@ -0,0 +1,108 @@
|
||||
Kube-router
|
||||
===========
|
||||
|
||||
Kube-router is a L3 CNI provider, as such it will setup IPv4 routing between
|
||||
nodes to provide Pods' networks reachability.
|
||||
|
||||
See [kube-router documentation](https://www.kube-router.io/).
|
||||
|
||||
## Verifying kube-router install
|
||||
|
||||
Kube-router runs its pods as a `DaemonSet` in the `kube-system` namespace:
|
||||
|
||||
* Check the status of kube-router pods
|
||||
|
||||
```
|
||||
# From the CLI
|
||||
kubectl get pod --namespace=kube-system -l k8s-app=kube-router -owide
|
||||
|
||||
# output
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
|
||||
kube-router-4f679 1/1 Running 0 2d 192.168.186.4 mykube-k8s-node-nf-2 <none>
|
||||
kube-router-5slf8 1/1 Running 0 2d 192.168.186.11 mykube-k8s-node-nf-3 <none>
|
||||
kube-router-lb6k2 1/1 Running 0 20h 192.168.186.14 mykube-k8s-node-nf-6 <none>
|
||||
kube-router-rzvrb 1/1 Running 0 20h 192.168.186.17 mykube-k8s-node-nf-4 <none>
|
||||
kube-router-v6n56 1/1 Running 0 2d 192.168.186.6 mykube-k8s-node-nf-1 <none>
|
||||
kube-router-wwhg8 1/1 Running 0 20h 192.168.186.16 mykube-k8s-node-nf-5 <none>
|
||||
kube-router-x2xs7 1/1 Running 0 2d 192.168.186.10 mykube-k8s-master-1 <none>
|
||||
```
|
||||
|
||||
* Peek at kube-router container logs:
|
||||
|
||||
```
|
||||
# From the CLI
|
||||
kubectl logs --namespace=kube-system -l k8s-app=kube-router | grep Peer.Up
|
||||
|
||||
# output
|
||||
time="2018-09-17T16:47:14Z" level=info msg="Peer Up" Key=192.168.186.6 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
time="2018-09-17T16:47:16Z" level=info msg="Peer Up" Key=192.168.186.11 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
time="2018-09-17T16:47:46Z" level=info msg="Peer Up" Key=192.168.186.10 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
time="2018-09-18T19:12:24Z" level=info msg="Peer Up" Key=192.168.186.14 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
time="2018-09-18T19:12:28Z" level=info msg="Peer Up" Key=192.168.186.17 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
time="2018-09-18T19:12:38Z" level=info msg="Peer Up" Key=192.168.186.16 State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
[...]
|
||||
```
|
||||
|
||||
## Gathering kube-router state
|
||||
|
||||
Kube-router Pods come bundled with a "Pod Toolbox" which provides very
|
||||
useful internal state views for:
|
||||
|
||||
* IPVS: via `ipvsadm`
|
||||
* BGP peering and routing info: via `gobgp`
|
||||
|
||||
You need to `kubectl exec -it ...` into a kube-router container to use these, see
|
||||
<https://www.kube-router.io/docs/pod-toolbox/> for details.
|
||||
|
||||
## Kube-router configuration
|
||||
|
||||
|
||||
You can change the default configuration by overriding `kube_router_...` variables
|
||||
(as found at `roles/network_plugin/kube-router/defaults/main.yml`),
|
||||
these are named to follow `kube-router` command-line options as per
|
||||
<https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
|
||||
|
||||
## Caveats
|
||||
|
||||
### kubeadm_enabled: true
|
||||
|
||||
If you want to set `kube-router` to replace `kube-proxy`
|
||||
(`--run-service-proxy=true`) while using `kubeadm_enabled`,
|
||||
then 'kube-proxy` DaemonSet will be removed *after* kubeadm finishes
|
||||
running, as it's not possible to skip kube-proxy install in kubeadm flags
|
||||
and/or config, see https://github.com/kubernetes/kubeadm/issues/776.
|
||||
|
||||
Given above, if `--run-service-proxy=true` is needed it would be
|
||||
better to void `kubeadm_enabled` i.e. set:
|
||||
|
||||
```
|
||||
kubeadm_enabled: false
|
||||
kube_router_run_service_proxy: true
|
||||
|
||||
```
|
||||
|
||||
If for some reason you do want/need to set `kubeadm_enabled`, removing
|
||||
it afterwards behave better if kube-proxy is set to ipvs mode, i.e. set:
|
||||
|
||||
```
|
||||
kubeadm_enabled: true
|
||||
kube_router_run_service_proxy: true
|
||||
kube_proxy_mode: ipvs
|
||||
```
|
||||
|
||||
## Advanced BGP Capabilities
|
||||
https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
|
||||
|
||||
If you have other networking devices or SDN systems that talk BGP, kube-router will fit in perfectly.
|
||||
From a simple full node-to-node mesh to per-node peering configurations, most routing needs can be attained.
|
||||
The configuration is Kubernetes native (annotations) just like the rest of kube-router.
|
||||
|
||||
For more details please refer to the https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md.
|
||||
|
||||
Next options will set up annotations for kube-router, using `kubectl annotate` command.
|
||||
|
||||
```
|
||||
kube_router_annotations_master: []
|
||||
kube_router_annotations_node: []
|
||||
kube_router_annotations_all: []
|
||||
```
|
||||
73
docs/multus.md
Normal file
73
docs/multus.md
Normal file
@@ -0,0 +1,73 @@
|
||||
Multus
|
||||
===========
|
||||
|
||||
Multus is a meta CNI plugin that provides multiple network interface support to
|
||||
pods. For each interface, Multus delegates CNI calls to secondary CNI plugins
|
||||
such as Calico, macvlan, etc.
|
||||
|
||||
See [multus documentation](https://github.com/intel/multus-cni).
|
||||
|
||||
## Multus installation
|
||||
|
||||
Since Multus itself does not implement networking, it requires a master plugin, which is specified through the variable `kube_network_plugin`. To enable Multus an additional variable `kube_network_plugin_multus` must be set to `true`. For example,
|
||||
```
|
||||
kube_network_plugin: calico
|
||||
kube_network_plugin_multus: true
|
||||
```
|
||||
will install Multus and Calico and configure Multus to use Calico as the primary network plugin.
|
||||
|
||||
## Using Multus
|
||||
|
||||
Once Multus is installed, you can create CNI configurations (as a CRD objects) for additional networks, in this case a macvlan CNI configuration is defined. You may replace the config field with any valid CNI configuration where the CNI binary is available on the nodes.
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: "k8s.cni.cncf.io/v1"
|
||||
kind: NetworkAttachmentDefinition
|
||||
metadata:
|
||||
name: macvlan-conf
|
||||
spec:
|
||||
config: '{
|
||||
"cniVersion": "0.3.0",
|
||||
"type": "macvlan",
|
||||
"master": "eth0",
|
||||
"mode": "bridge",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "192.168.1.0/24",
|
||||
"rangeStart": "192.168.1.200",
|
||||
"rangeEnd": "192.168.1.216",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
],
|
||||
"gateway": "192.168.1.1"
|
||||
}
|
||||
}'
|
||||
EOF
|
||||
```
|
||||
|
||||
You may then create a pod with and additional interface that connects to this network using annotations. The annotation correlates to the name in the NetworkAttachmentDefinition above.
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: samplepod
|
||||
annotations:
|
||||
k8s.v1.cni.cncf.io/networks: macvlan-conf
|
||||
spec:
|
||||
containers:
|
||||
- name: samplepod
|
||||
command: ["/bin/bash", "-c", "sleep 2000000000000"]
|
||||
image: dougbtv/centos-network
|
||||
EOF
|
||||
```
|
||||
|
||||
You may now inspect the pod and see that there is an additional interface configured:
|
||||
|
||||
```
|
||||
$ kubectl exec -it samplepod -- ip a
|
||||
```
|
||||
|
||||
For more details on how to use Multus, please visit https://github.com/intel/multus-cni
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user