mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
199 Commits
release-2.
...
v2.11.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0ccda8a42 | ||
|
|
c8dad3f6c6 | ||
|
|
5ec9ab7ec0 | ||
|
|
73097aa39d | ||
|
|
86cc703c75 | ||
|
|
4dba34bd02 | ||
|
|
b0437516c1 | ||
|
|
da015e0249 | ||
|
|
554857da97 | ||
|
|
9bf23fa43b | ||
|
|
42287066d3 | ||
|
|
a1ff1de975 | ||
|
|
1bfbc5bbc4 | ||
|
|
c5b4d3ceaa | ||
|
|
8fc9c5d025 | ||
|
|
42bba66c02 | ||
|
|
53bc80bb59 | ||
|
|
771ce96e6d | ||
|
|
fc456ff0cd | ||
|
|
b4f70db878 | ||
|
|
5707f79b33 | ||
|
|
0a2f4edfc6 | ||
|
|
56fa46716e | ||
|
|
b74abe56fd | ||
|
|
62aecd1e4a | ||
|
|
973afef96e | ||
|
|
a235605d2c | ||
|
|
023108a733 | ||
|
|
75d1be8272 | ||
|
|
a44235d11b | ||
|
|
7abf6a6958 | ||
|
|
0d0b1fdf82 | ||
|
|
b710c72f04 | ||
|
|
678c316d01 | ||
|
|
bc6de32faf | ||
|
|
7cf8ad4dc7 | ||
|
|
02ec72fa40 | ||
|
|
d22634a597 | ||
|
|
4132cee687 | ||
|
|
f3df0d5f4a | ||
|
|
1d285e654d | ||
|
|
dc6ad64ec7 | ||
|
|
92bfcf0467 | ||
|
|
54b1fe83f3 | ||
|
|
5337cff179 | ||
|
|
1be788f785 | ||
|
|
8afbf339f7 | ||
|
|
8c935dfb50 | ||
|
|
66c5ed8406 | ||
|
|
4087e97505 | ||
|
|
da50ed0936 | ||
|
|
fbbfff3795 | ||
|
|
fb9103acd3 | ||
|
|
49d921cf91 | ||
|
|
fe29c97ae8 | ||
|
|
2abb6c8689 | ||
|
|
a3ca441998 | ||
|
|
9cf503acb1 | ||
|
|
1cbdd7ed5c | ||
|
|
428e52e0d1 | ||
|
|
70dc222719 | ||
|
|
69f796f0c7 | ||
|
|
5826f0810c | ||
|
|
de9443a694 | ||
|
|
99c5f7e013 | ||
|
|
d9dedc2cd5 | ||
|
|
23ae6027ab | ||
|
|
781b5691c9 | ||
|
|
fd9bbcb157 | ||
|
|
e0410661fa | ||
|
|
8ef754678a | ||
|
|
161a8f55fa | ||
|
|
7481cc31e1 | ||
|
|
b15b6e834f | ||
|
|
76640cf1a1 | ||
|
|
374ea7b81d | ||
|
|
46bef931e9 | ||
|
|
a36e9ae690 | ||
|
|
728155a2a1 | ||
|
|
cdf9a9f4fc | ||
|
|
29307740dd | ||
|
|
a038d62644 | ||
|
|
20c7e31ea3 | ||
|
|
65065e7fdf | ||
|
|
352297cf8d | ||
|
|
a67a50f9c0 | ||
|
|
324bc41097 | ||
|
|
c81b443d93 | ||
|
|
dc16ab92f4 | ||
|
|
53032a6695 | ||
|
|
d90a5f291b | ||
|
|
3b7791501e | ||
|
|
f2b8a3614d | ||
|
|
e89b47c7ee | ||
|
|
2aa66eb12d | ||
|
|
4c8b93e5b9 | ||
|
|
216631bf02 | ||
|
|
c7f3123e28 | ||
|
|
f599c2a691 | ||
|
|
bc7d1f36ea | ||
|
|
80fa294a31 | ||
|
|
465dfd68bc | ||
|
|
73f45fbe94 | ||
|
|
d270678bda | ||
|
|
de028814e5 | ||
|
|
b5406b752d | ||
|
|
6025981ceb | ||
|
|
4348e78b24 | ||
|
|
e2f9adc2ff | ||
|
|
f67a24499b | ||
|
|
5c704552d8 | ||
|
|
d83ea51101 | ||
|
|
fa6027e8f0 | ||
|
|
2849191e67 | ||
|
|
0559eec681 | ||
|
|
a3a7fe7c8e | ||
|
|
9b2d176617 | ||
|
|
7a3547e4d1 | ||
|
|
e6fb686156 | ||
|
|
5e80603bbb | ||
|
|
c8d95a1586 | ||
|
|
27a99e0a3f | ||
|
|
3cc351dff9 | ||
|
|
23c9071c30 | ||
|
|
14141ec137 | ||
|
|
5bec2edaf7 | ||
|
|
f504d0ea99 | ||
|
|
3b7797b1a1 | ||
|
|
aa63eb6196 | ||
|
|
23aa3e4638 | ||
|
|
56ae3bfec2 | ||
|
|
4d5c4a13cb | ||
|
|
69a8f91512 | ||
|
|
fa791cc344 | ||
|
|
456f743470 | ||
|
|
ab6f0012cc | ||
|
|
4afbf51d32 | ||
|
|
d62684b617 | ||
|
|
a8dfcbbfc7 | ||
|
|
bbdc6210f5 | ||
|
|
c7f6ed1495 | ||
|
|
818aa7aeb1 | ||
|
|
045acc724b | ||
|
|
d540560619 | ||
|
|
797bfd85b0 | ||
|
|
07cb8ebef7 | ||
|
|
54416cabfd | ||
|
|
3617ae31f6 | ||
|
|
4f05d801c3 | ||
|
|
956afcb33f | ||
|
|
6347419233 | ||
|
|
0c7a50fe1e | ||
|
|
7423932510 | ||
|
|
b41530ba5d | ||
|
|
29e916508c | ||
|
|
b45f3f0004 | ||
|
|
2a5721b4d4 | ||
|
|
e30a703c8e | ||
|
|
333f1a4a40 | ||
|
|
84b278021a | ||
|
|
1e470b0473 | ||
|
|
0ef3a7914c | ||
|
|
a3fff1e438 | ||
|
|
4bc204925a | ||
|
|
5d9946184a | ||
|
|
5ba169a612 | ||
|
|
872b37f751 | ||
|
|
8485136f9a | ||
|
|
ff1bc739f1 | ||
|
|
594a0e7f1b | ||
|
|
8e28ba38d2 | ||
|
|
73c2ff17dd | ||
|
|
13f225e6ae | ||
|
|
3f62492a15 | ||
|
|
5e3bd2dff1 | ||
|
|
787a9c74fa | ||
|
|
14749df6f3 | ||
|
|
2db2898112 | ||
|
|
3776000fc4 | ||
|
|
f0572e59e7 | ||
|
|
6217184c7f | ||
|
|
044dcbaed0 | ||
|
|
8a5eae94ea | ||
|
|
bf3c6aeed1 | ||
|
|
f3fbf995ca | ||
|
|
03bded2b6b | ||
|
|
d5c0829d61 | ||
|
|
00369303de | ||
|
|
1f1479c0a7 | ||
|
|
e67f848abc | ||
|
|
560f50d3cd | ||
|
|
3f45122d0d | ||
|
|
50bdaa573c | ||
|
|
24b6698cc9 | ||
|
|
73885d3b9e | ||
|
|
f29387316f | ||
|
|
d6fd0d2aca | ||
|
|
e814da1eec | ||
|
|
e029a09345 |
@@ -3,14 +3,23 @@ parseable: true
|
||||
skip_list:
|
||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||
# The following rules throw errors.
|
||||
# These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
|
||||
- '204'
|
||||
- '206'
|
||||
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||
- '301'
|
||||
- '305'
|
||||
- '306'
|
||||
- '404'
|
||||
- '502'
|
||||
- '503'
|
||||
- '504'
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
# [E204]: "Lines should be no longer than 160 chars"
|
||||
# This could be re-enabled with a major rewrite in the future.
|
||||
# For now, there's not enough value gain from strictly limiting line length.
|
||||
# (Disabled in May 2019)
|
||||
- '204'
|
||||
|
||||
# [E701]: "meta/main.yml should contain relevant info"
|
||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
stages:
|
||||
- unit-tests
|
||||
- moderator
|
||||
- deploy-part1
|
||||
- moderator
|
||||
- deploy-part2
|
||||
- deploy-gce
|
||||
- deploy-special
|
||||
@@ -37,7 +37,7 @@ before_script:
|
||||
tags:
|
||||
- packet
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.9.0
|
||||
KUBESPRAY_VERSION: v2.10.0
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
|
||||
.testcases: &testcases
|
||||
@@ -60,11 +60,12 @@ ci-authorized:
|
||||
script:
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
# Disable ci moderator
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/gce.yml
|
||||
- .gitlab-ci/digital-ocean.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
<<: *gce_variables
|
||||
tags:
|
||||
- gce
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
@@ -36,8 +38,6 @@ gce_ubuntu18-flannel-aio:
|
||||
stage: deploy-part1
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### PR JOBS PART2
|
||||
|
||||
@@ -45,15 +45,11 @@ gce_coreos-calico-aio:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
gce_centos7-flannel-addons:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
@@ -64,36 +60,42 @@ gce_centos-weave-kubeadm-sep:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-canal-ha-triggers:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-flannel-addons-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
|
||||
@@ -102,27 +104,23 @@ gce_ubuntu-canal-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-canal-kubeadm:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
|
||||
gce_centos-weave-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
@@ -131,99 +129,87 @@ gce_centos-weave-kubeadm-triggers:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-contiv-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-cilium:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_rhel7-weave:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_rhel7-weave-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_debian9-calico-upgrade:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_debian9-calico-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-canal-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_rhel7-canal-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_rhel7-canal-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-calico-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_centos7-calico-ha-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_centos7-multus-calico:
|
||||
stage: deploy-gce
|
||||
@@ -231,6 +217,11 @@ gce_centos7-multus-calico:
|
||||
variables:
|
||||
<<: *centos7_multus_calico_variables
|
||||
when: manual
|
||||
|
||||
gce_oracle-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
@@ -238,27 +229,19 @@ gce_opensuse-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||
gce_coreos-alpha-weave-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-kube-router-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
@@ -6,6 +6,15 @@ yamllint:
|
||||
- yamllint --strict .
|
||||
except: ['triggers', 'master']
|
||||
|
||||
vagrant-validate:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- vagrant validate --ignore-provider
|
||||
except: ['triggers', 'master']
|
||||
|
||||
ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
<<: *packet_variables
|
||||
tags:
|
||||
- packet
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
|
||||
.test-upgrade: &test-upgrade
|
||||
variables:
|
||||
@@ -18,8 +20,6 @@ packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
@@ -27,8 +27,6 @@ packet_centos7-flannel-addons:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
@@ -37,12 +35,14 @@ packet_centos-weave-kubeadm-sep:
|
||||
<<: *packet
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
packet_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||
|
||||
@@ -50,74 +50,73 @@ packet_ubuntu-canal-ha:
|
||||
stage: deploy-special
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_ubuntu-canal-kubeadm:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_ubuntu-flannel-ha:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-contiv-sep:
|
||||
stage: deploy-special
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_ubuntu18-flannel-containerd:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan-sep:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-ovn:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-special
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
when: on_success
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
packet_ubuntu-kube-router-sep:
|
||||
stage: deploy-special
|
||||
packet_oracle-7-canal:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
<<: *packet
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
@@ -4,49 +4,51 @@
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
- ./tests/scripts/terraform_install.sh
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
# Install Terraform
|
||||
- apt-get install -y unzip
|
||||
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
|
||||
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
|
||||
# Prepare inventory
|
||||
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
|
||||
- cd inventory/$CLUSTER
|
||||
- ln -s ../../contrib/terraform/$PROVIDER/hosts
|
||||
- terraform init ../../contrib/terraform/$PROVIDER
|
||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
stage: unit-tests
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
|
||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
variables:
|
||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||
ANSIBLE_INVENTORY: hosts
|
||||
CI_PLATFORM: tf
|
||||
TF_VAR_ssh_user: $SSH_USER
|
||||
TF_VAR_cluster_name: $CI_JOB_ID
|
||||
script:
|
||||
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
|
||||
- ansible-playbook -i hosts ../../cluster.yml --become
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- cd inventory/$CLUSTER
|
||||
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.11.11
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
@@ -70,7 +72,6 @@ tf-packet-ubuntu16-default:
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
@@ -85,7 +86,6 @@ tf-packet-ubuntu18-default:
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
@@ -105,15 +105,16 @@ tf-packet-ubuntu18-default:
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
|
||||
tf-apply-ovh:
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.11.11
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
TF_VAR_cluster_name: $CI_COMMIT_REF_SLUG
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
@@ -131,3 +132,31 @@ tf-apply-ovh:
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
tf-ovh_coreos-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.6
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: core
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_image: "CoreOS Stable"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -4,8 +4,8 @@ RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python-dev sshpass apt-transport-https jq \
|
||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
@@ -13,6 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl \
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
@@ -18,3 +18,4 @@ aliases:
|
||||
- chapsuk
|
||||
- mirwan
|
||||
- miouge1
|
||||
- holmsten
|
||||
|
||||
34
README.md
34
README.md
@@ -36,9 +36,9 @@ To deploy the cluster you can use :
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `-b` is required, as for example writing SSL keys in /etc/,
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without -b the playbook will fail to run!
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
@@ -101,6 +101,7 @@ Supported Linux Distributions
|
||||
- **Fedora** 28
|
||||
- **Fedora/CentOS** Atomic
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
@@ -108,32 +109,33 @@ Supported Components
|
||||
--------------------
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.1
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.26
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
|
||||
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||
- Network Plugin
|
||||
- [calico](https://github.com/projectcalico/calico) v3.4.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.5.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
|
||||
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- **Ansible v2.7.8 (or newer) and python-netaddr is installed on the machine
|
||||
- **Minimum required version of Kubernetes is v1.14**
|
||||
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
@@ -156,7 +158,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
||||
Network Plugins
|
||||
---------------
|
||||
|
||||
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
@@ -170,13 +172,17 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
|
||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||
|
||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
|
||||
15
Vagrantfile
vendored
15
Vagrantfile
vendored
@@ -21,10 +21,11 @@ SUPPORTED_OS = {
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
@@ -180,9 +181,17 @@ Vagrant.configure("2") do |config|
|
||||
"flannel_interface": "eth1",
|
||||
"kube_network_plugin": $network_plugin,
|
||||
"kube_network_plugin_multus": $multi_networking,
|
||||
"docker_keepcache": "1",
|
||||
"download_run_once": "False",
|
||||
"download_run_once": "True",
|
||||
"download_localhost": "False",
|
||||
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
||||
# Make kubespray cache even when download_run_once is false
|
||||
"download_force_cache": "True",
|
||||
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
||||
"download_keep_remote_cache": "False",
|
||||
"docker_keepcache": "1",
|
||||
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
||||
"kubeconfig_localhost": "True",
|
||||
"kubectl_localhost": "True",
|
||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
||||
|
||||
@@ -4,6 +4,8 @@ ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
host_key_checking=False
|
||||
gathering = smart
|
||||
|
||||
51
cluster.yml
51
cluster.yml
@@ -19,53 +19,50 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
- hosts: k8s-cluster:etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: false
|
||||
pre_tasks:
|
||||
- name: gather facts from all instances
|
||||
setup:
|
||||
delegate_to: "{{item}}"
|
||||
delegate_facts: true
|
||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||
run_once: true
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
- hosts: k8s-cluster:etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{proxy_env}}"
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster:calico-rr
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{proxy_env}}"
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -82,6 +79,12 @@
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
@@ -98,18 +101,12 @@
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: network }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{proxy_env}}"
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
|
||||
@@ -42,8 +42,11 @@ class SearchEC2Tags(object):
|
||||
region = os.environ['REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
|
||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||
cluster_name = os.getenv('CLUSTER_NAME')
|
||||
if cluster_name:
|
||||
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
|
||||
instances = ec2.instances.filter(Filters=filters)
|
||||
for instance in instances:
|
||||
|
||||
##Suppose default vpc_visibility is private
|
||||
|
||||
@@ -4,8 +4,11 @@
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- set_fact:
|
||||
- name: Set vm_list
|
||||
set_fact:
|
||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||
|
||||
- name: Generate inventory
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
|
||||
@@ -8,9 +8,22 @@
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- set_fact:
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
- name: Set VM IP, roles lists and load balancer public IP
|
||||
set_fact:
|
||||
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
|
||||
|
||||
- name: Generate inventory
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
## External LB example config
|
||||
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
|
||||
loadbalancer_apiserver:
|
||||
address: {{ lb_pubip.ipAddress }}
|
||||
port: 6443
|
||||
|
||||
## Internal loadbalancers for apiservers
|
||||
loadbalancer_apiserver_localhost: false
|
||||
@@ -29,7 +29,7 @@ sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||
imageReference:
|
||||
publisher: "OpenLogic"
|
||||
offer: "CentOS"
|
||||
sku: "7.2"
|
||||
sku: "7.5"
|
||||
version: "latest"
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
---
|
||||
- set_fact:
|
||||
base_dir: "{{playbook_dir}}/.generated/"
|
||||
- name: Set base_dir
|
||||
set_fact:
|
||||
base_dir: "{{ playbook_dir }}/.generated/"
|
||||
|
||||
- file: path={{base_dir}} state=directory recurse=true
|
||||
- name: Create base_dir
|
||||
file:
|
||||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
|
||||
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
- name: Null-ify some linux tools to ease DIND
|
||||
file:
|
||||
src: "/bin/true"
|
||||
dest: "{{item}}"
|
||||
dest: "{{ item }}"
|
||||
state: link
|
||||
force: yes
|
||||
with_items:
|
||||
@@ -52,7 +52,7 @@
|
||||
- rsyslog
|
||||
- "{{ distro_ssh_service }}"
|
||||
|
||||
- name: Create distro user "{{distro_user}}"
|
||||
- name: Create distro user "{{ distro_user }}"
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
- /lib/modules:/lib/modules
|
||||
- "{{ item }}:/dind/docker"
|
||||
register: containers
|
||||
with_items: "{{groups.containers}}"
|
||||
with_items: "{{ groups.containers }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ def get_var_as_bool(name, default):
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
@@ -96,9 +97,10 @@ class KubesprayInventory(object):
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_master(list(self.hosts.keys())[etcd_hosts_count:5])
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
else:
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
|
||||
@@ -2,9 +2,11 @@
|
||||
```
|
||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||
```
|
||||
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||
|
||||
## Install
|
||||
```
|
||||
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
|
||||
|
||||
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||
```
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
---
|
||||
metallb:
|
||||
ip_range: "10.5.0.50-10.5.0.99"
|
||||
protocol: "layer2"
|
||||
# additional_address_pools:
|
||||
# kube_service_pool:
|
||||
# ip_range: "10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||
kube:
|
||||
name: "MetalLB"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
become: true
|
||||
|
||||
@@ -8,6 +8,14 @@ data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: loadbalanced
|
||||
protocol: layer2
|
||||
protocol: {{ metallb.protocol }}
|
||||
addresses:
|
||||
- {{ metallb.ip_range }}
|
||||
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||
- name: {{ pool }}
|
||||
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||
addresses:
|
||||
- {{ metallb.additional_address_pools[pool].ip_range }}
|
||||
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
15
contrib/misc/clusteradmin-rbac.yml
Normal file
15
contrib/misc/clusteradmin-rbac.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
@@ -12,9 +14,9 @@
|
||||
kube:
|
||||
name: glusterfs
|
||||
namespace: default
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -14,3 +14,5 @@ ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contr
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
register: "initial_heketi_state"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||
@@ -16,15 +17,20 @@
|
||||
register: "initial_heketi_pod"
|
||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||
changed_when: false
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||
- set_fact:
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
@@ -42,6 +48,7 @@
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
|
||||
# ensure endpoints actually exist before trying to move database data to it
|
||||
- name: "Create heketi storage."
|
||||
include_tasks: "bootstrap/storage.yml"
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Wait for heketi bootstrap to complete."
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- name: "Create heketi storage."
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
state: "present"
|
||||
vars:
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||
@@ -33,6 +33,6 @@
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
---
|
||||
- register: "label_present"
|
||||
- name: Get storage nodes
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines|length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
- register: "label_present"
|
||||
|
||||
- name: Get storage nodes again
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present|length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||
become: true
|
||||
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
- name: "Ensure heketi is up and running."
|
||||
changed_when: false
|
||||
register: "heketi_state"
|
||||
vars:
|
||||
heketi_state: { stdout: "{}" }
|
||||
heketi_state:
|
||||
stdout: "{}"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
@@ -22,5 +27,7 @@
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- set_fact:
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
- name: "Kubernetes Apps | Test Heketi"
|
||||
register: "heketi_service_state"
|
||||
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||
|
||||
@@ -1,31 +1,44 @@
|
||||
---
|
||||
- register: "clusterrolebinding_state"
|
||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
- name: Get clusterrolebindings
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
- register: "clusterrolebinding_state"
|
||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
- assert:
|
||||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- register: "secret_state"
|
||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
- name: Get the heketi-config-secret secret
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Render Heketi secret configuration."
|
||||
become: true
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout == \"\""
|
||||
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
- register: "secret_state"
|
||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
- assert:
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout != \"\""
|
||||
msg: "Heketi config secret is not present."
|
||||
|
||||
@@ -7,6 +7,6 @@
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
@@ -20,6 +20,6 @@
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:7",
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "deploy-heketi",
|
||||
"env": [
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:7",
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "heketi",
|
||||
"env": [
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
heketi_remove_lvm: false
|
||||
@@ -14,6 +14,8 @@
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
- name: "Get volume group information."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
@@ -21,12 +23,16 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "vgremove {{ volume_group }} --yes"
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true
|
||||
@@ -36,11 +42,11 @@
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
|
||||
|
||||
1
contrib/terraform/openstack/.gitignore
vendored
1
contrib/terraform/openstack/.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
.terraform
|
||||
*.tfvars
|
||||
!sample-inventory\/cluster.tfvars
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
|
||||
@@ -16,14 +16,13 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [ELASTX](https://elastx.se/)
|
||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||
- [FugaCloud](https://fuga.cloud/)
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
|
||||
- [OVH](https://www.ovh.com/)
|
||||
- [Rackspace](https://www.rackspace.com/)
|
||||
- [Ultimum](https://ultimum.io/)
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
|
||||
### Known incompatible public clouds
|
||||
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
|
||||
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
@@ -70,7 +69,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in Glance
|
||||
- you already have a floating IP pool created
|
||||
@@ -220,7 +219,7 @@ set OS_PROJECT_DOMAIN_NAME=Default
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|
||||
|Variable | Description |
|
||||
|---------|-------------|
|
||||
@@ -246,6 +245,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
@@ -276,7 +276,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
@@ -290,7 +290,7 @@ pick it up automatically.
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
@@ -325,6 +325,30 @@ $ ssh-add ~/.ssh/id_rsa
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
|
||||
#### Metadata variables
|
||||
|
||||
The [python script](../terraform.py) that reads the
|
||||
generated`.tfstate` file to generate a dynamic inventory recognizes
|
||||
some variables within a "metadata" block, defined in a "resource"
|
||||
block (example):
|
||||
|
||||
```
|
||||
resource "openstack_compute_instance_v2" "example" {
|
||||
...
|
||||
metadata {
|
||||
ssh_user = "ubuntu"
|
||||
prefer_ipv6 = true
|
||||
python_bin = "/usr/bin/python3"
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
As the example shows, these let you define the SSH username for
|
||||
Ansible, a Python binary which is needed by Ansible if
|
||||
`/usr/bin/python` doesn't exist, and whether the IPv6 address of the
|
||||
instance should be preferred over IPv4.
|
||||
|
||||
#### Bastion host
|
||||
|
||||
Bastion access will be determined by:
|
||||
@@ -391,6 +415,11 @@ kube_network_plugin: flannel
|
||||
# For Container Linux by CoreOS:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
- Set max amount of attached cinder volume per host (default 256)
|
||||
```
|
||||
node_volume_attach_limit: 26
|
||||
```
|
||||
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ provider "openstack" {
|
||||
}
|
||||
|
||||
module "network" {
|
||||
source = "modules/network"
|
||||
source = "./modules/network"
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
@@ -14,7 +14,7 @@ module "network" {
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
source = "modules/ips"
|
||||
source = "./modules/ips"
|
||||
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
@@ -27,7 +27,7 @@ module "ips" {
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
source = "modules/compute"
|
||||
source = "./modules/compute"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
az_list = "${var.az_list}"
|
||||
@@ -63,6 +63,7 @@ module "compute" {
|
||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||
wait_for_floatingip = "${var.wait_for_floatingip}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
@@ -22,20 +22,20 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
count = "${var.number_of_bastions ? 1 : 0}"
|
||||
count = "${var.number_of_bastions != "" ? 1 : 0}"
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
|
||||
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||
@@ -99,7 +99,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
@@ -109,7 +109,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,7 +257,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,12 +288,14 @@ resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
@@ -306,6 +308,7 @@ resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
|
||||
@@ -82,6 +82,8 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "wait_for_floatingip" {}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
resource "null_resource" "dummy_dependency" {
|
||||
triggers {
|
||||
triggers = {
|
||||
dependency_id = "${var.router_id}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
output "k8s_master_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
||||
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
|
||||
}
|
||||
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_master_no_etcd.*.address}"]
|
||||
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
||||
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
||||
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ resource "openstack_networking_network_v2" "k8s" {
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
count = "${var.use_neutron}"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
|
||||
cidr = "${var.subnet_cidr}"
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
@@ -22,6 +22,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
count = "${var.use_neutron}"
|
||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
||||
}
|
||||
|
||||
@@ -125,6 +125,11 @@ variable "floatingip_pool" {
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "wait_for_floatingip" {
|
||||
description = "Terraform will poll the instance until the floating IP has been associated."
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "external_net" {
|
||||
description = "uuid of the external/public network"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2015 Cisco Systems, Inc.
|
||||
#
|
||||
@@ -20,15 +20,15 @@
|
||||
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
|
||||
directory and generates an inventory based on them.
|
||||
"""
|
||||
from __future__ import unicode_literals, print_function
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
import random
|
||||
from functools import wraps
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
VERSION = '0.3.0pre'
|
||||
VERSION = '0.4.0pre'
|
||||
|
||||
|
||||
def tfstates(root=None):
|
||||
@@ -38,15 +38,58 @@ def tfstates(root=None):
|
||||
if os.path.splitext(name)[-1] == '.tfstate':
|
||||
yield os.path.join(dirpath, name)
|
||||
|
||||
def convert_to_v3_structure(attributes, prefix=''):
|
||||
""" Convert the attributes from v4 to v3
|
||||
Receives a dict and return a dictionary """
|
||||
result = {}
|
||||
if isinstance(attributes, str):
|
||||
# In the case when we receive a string (e.g. values for security_groups)
|
||||
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
|
||||
for key, value in attributes.items():
|
||||
if isinstance(value, list):
|
||||
if len(value):
|
||||
result['{}{}.#'.format(prefix, key, hash)] = len(value)
|
||||
for i, v in enumerate(value):
|
||||
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
|
||||
elif isinstance(value, dict):
|
||||
result['{}{}.%'.format(prefix, key)] = len(value)
|
||||
for k, v in value.items():
|
||||
result['{}{}.{}'.format(prefix, key, k)] = v
|
||||
else:
|
||||
result['{}{}'.format(prefix, key)] = value
|
||||
return result
|
||||
|
||||
def iterresources(filenames):
|
||||
for filename in filenames:
|
||||
with open(filename, 'r') as json_file:
|
||||
state = json.load(json_file)
|
||||
tf_version = state['version']
|
||||
if tf_version == 3:
|
||||
for module in state['modules']:
|
||||
name = module['path'][-1]
|
||||
for key, resource in module['resources'].items():
|
||||
yield name, key, resource
|
||||
elif tf_version == 4:
|
||||
# In version 4 the structure changes so we need to iterate
|
||||
# each instance inside the resource branch.
|
||||
for resource in state['resources']:
|
||||
name = resource['module'].split('.')[-1]
|
||||
for instance in resource['instances']:
|
||||
key = "{}.{}".format(resource['type'], resource['name'])
|
||||
if 'index_key' in instance:
|
||||
key = "{}.{}".format(key, instance['index_key'])
|
||||
data = {}
|
||||
data['type'] = resource['type']
|
||||
data['provider'] = resource['provider']
|
||||
data['depends_on'] = instance.get('depends_on', [])
|
||||
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
|
||||
if 'id' in instance['attributes']:
|
||||
data['primary']['id'] = instance['attributes']['id']
|
||||
data['primary']['meta'] = instance['attributes'].get('meta',{})
|
||||
yield name, key, data
|
||||
else:
|
||||
raise KeyError('tfstate version %d not supported' % tf_version)
|
||||
|
||||
|
||||
## READ RESOURCES
|
||||
PARSERS = {}
|
||||
@@ -109,7 +152,7 @@ def calculate_mantl_vars(func):
|
||||
|
||||
|
||||
def _parse_prefix(source, prefix, sep='.'):
|
||||
for compkey, value in source.items():
|
||||
for compkey, value in list(source.items()):
|
||||
try:
|
||||
curprefix, rest = compkey.split(sep, 1)
|
||||
except ValueError:
|
||||
@@ -127,7 +170,7 @@ def parse_attr_list(source, prefix, sep='.'):
|
||||
idx, key = compkey.split(sep, 1)
|
||||
attrs[idx][key] = value
|
||||
|
||||
return attrs.values()
|
||||
return list(attrs.values())
|
||||
|
||||
|
||||
def parse_dict(source, prefix, sep='.'):
|
||||
@@ -239,6 +282,12 @@ def openstack_host(resource, module_name):
|
||||
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
||||
|
||||
try:
|
||||
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||
attrs.update({
|
||||
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||
'publicly_routable': True,
|
||||
})
|
||||
else:
|
||||
attrs.update({
|
||||
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
||||
'publicly_routable': True,
|
||||
@@ -252,9 +301,9 @@ def openstack_host(resource, module_name):
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
|
||||
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
|
||||
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
|
||||
device_index = 1
|
||||
for key, value in raw_attrs.items():
|
||||
for key, value in list(raw_attrs.items()):
|
||||
match = re.search("^volume.*.device$", key)
|
||||
if match:
|
||||
attrs['disk_volume_device_'+str(device_index)] = value
|
||||
@@ -272,7 +321,7 @@ def openstack_host(resource, module_name):
|
||||
groups.append('os_image=' + attrs['image']['name'])
|
||||
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||
groups.extend('os_metadata_%s=%s' % item
|
||||
for item in attrs['metadata'].items())
|
||||
for item in list(attrs['metadata'].items()))
|
||||
groups.append('os_region=' + attrs['region'])
|
||||
|
||||
# groups specific to Mantl
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
||||
retries: 3
|
||||
delay: 2
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
run_once: true
|
||||
failed_when: false
|
||||
register: vault_etcd_health_check
|
||||
|
||||
@@ -32,7 +32,7 @@ The name of the resource group your instances are in, can be retrieved via `azur
|
||||
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
||||
|
||||
#### azure\_subnet\_name
|
||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
|
||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||
|
||||
#### azure\_security\_group\_name
|
||||
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
||||
@@ -40,14 +40,14 @@ The name of the network security group your instances are in, can be retrieved v
|
||||
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||
These will have to be generated first:
|
||||
- Create an Azure AD Application with:
|
||||
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
|
||||
The name, identifier-uri, home-page and the password can be choosen
|
||||
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
display name, identifier-uri, homepage and the password can be choosen
|
||||
Note the AppId in the output.
|
||||
- Create Service principal for the application with:
|
||||
`azure ad sp create --applicationId AppId`
|
||||
`azure ad sp create --id AppId`
|
||||
This is the AppId from the last command
|
||||
- Create the role assignment with:
|
||||
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
||||
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||
|
||||
|
||||
@@ -119,13 +119,13 @@ recommended here:
|
||||
|
||||
You need to edit your inventory and add:
|
||||
|
||||
* `calico-rr` group with nodes in it. At the moment it's incompatible with
|
||||
`kube-node` due to BGP port conflict with `calico-node` container. So you
|
||||
should not have nodes in both `calico-rr` and `kube-node` groups.
|
||||
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
|
||||
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
|
||||
group of `k8s-cluster` group.
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with route reflectors:
|
||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||
|
||||
```
|
||||
[all]
|
||||
@@ -154,6 +154,7 @@ node5
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
calico-rr
|
||||
|
||||
[calico-rr]
|
||||
rr0
|
||||
|
||||
@@ -114,10 +114,12 @@ The only exception is that ``hostNetwork: true`` PODs and non-k8s managed contai
|
||||
cluster service names.
|
||||
|
||||
## Nodelocal DNS cache
|
||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns / core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||
|
||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
||||
|
||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
@@ -129,9 +131,7 @@ Limitations
|
||||
|
||||
* There is
|
||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||
for the SkyDNS ``ndots`` param via an
|
||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||
add-on, while SkyDNS supports it though.
|
||||
for the SkyDNS ``ndots`` param.
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
|
||||
@@ -3,23 +3,22 @@ Downloading binaries and containers
|
||||
|
||||
Kubespray supports several download/upload modes. The default is:
|
||||
|
||||
* Each node downloads binaries and container images on its own, which is
|
||||
``download_run_once: False``.
|
||||
* Each node downloads binaries and container images on its own, which is ``download_run_once: False``.
|
||||
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
|
||||
* For system managed containers, like kubelet or etcd, pull policy is
|
||||
``download_always_pull: False``, which is pull if only the wanted repo and
|
||||
tag/sha256 digest differs from that the host has.
|
||||
* For system managed containers, like kubelet or etcd, pull policy is ``download_always_pull: False``, which is pull if only the wanted repo and tag/sha256 digest differs from that the host has.
|
||||
|
||||
There is also a "pull once, push many" mode as well:
|
||||
|
||||
* Override the ``download_run_once: True`` to download container images only once
|
||||
then push to cluster nodes in batches. The default delegate node
|
||||
for pushing images is the first `kube-master`.
|
||||
* If your ansible runner node (aka the admin node) have password-less sudo and
|
||||
docker enabled, you may want to define the ``download_localhost: True``, which
|
||||
makes that node a delegate for pushing images while running the deployment with
|
||||
ansible. This maybe the case if cluster nodes cannot access each over via ssh
|
||||
or you want to use local docker images as a cache for multiple clusters.
|
||||
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
|
||||
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
|
||||
|
||||
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
||||
|
||||
On caching:
|
||||
|
||||
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
|
||||
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
|
||||
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
||||
|
||||
Container images and binary files are described by the vars like ``foo_version``,
|
||||
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||
@@ -29,15 +28,14 @@ Container images may be defined by its repo and tag, for example:
|
||||
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
|
||||
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
|
||||
|
||||
Note, the sha256 digest and the image tag must be both specified and correspond
|
||||
Note, the SHA256 digest and the image tag must be both specified and correspond
|
||||
to each other. The given example above is represented by the following vars:
|
||||
```
|
||||
```yaml
|
||||
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
||||
dnsmasq_image_repo: andyshinn/dnsmasq
|
||||
dnsmasq_image_tag: '2.72'
|
||||
```
|
||||
The full list of available vars may be found in the download's ansible role defaults.
|
||||
Those also allow to specify custom urls and local repositories for binaries and container
|
||||
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
|
||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||
so the hosts can resolve those urls and repos.
|
||||
|
||||
@@ -46,7 +44,7 @@ so the hosts can resolve those urls and repos.
|
||||
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
|
||||
|
||||
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
||||
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||
NOTE: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||
* `pyrepo_index` (and optionally `pyrepo_cert`)
|
||||
* Depending on the `container_manager`
|
||||
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
||||
|
||||
@@ -51,20 +51,27 @@ You may want to add worker, master or etcd nodes to your existing cluster. This
|
||||
Remove nodes
|
||||
------------
|
||||
|
||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||
You may want to remove **master**, **worker**, or **etcd** nodes from your
|
||||
existing cluster. This can be done by re-running the `remove-node.yml`
|
||||
playbook. First, all specified nodes will be drained, then stop some
|
||||
kubernetes services and delete some certificates,
|
||||
and finally execute the kubectl command to delete these nodes.
|
||||
This can be combined with the add node function. This is generally helpful
|
||||
when doing something like autoscaling your clusters. Of course, if a node
|
||||
is not working, you can remove the node and install it again.
|
||||
|
||||
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
|
||||
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
|
||||
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
||||
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key \
|
||||
--extra-vars "node=nodename,nodename2"
|
||||
```
|
||||
|
||||
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
|
||||
to skip the node reset step. If one node is unavailable, but others you wish
|
||||
to remove are able to connect via SSH, you could set reset_nodes=no as a host
|
||||
var in inventory.
|
||||
|
||||
Connecting to Kubernetes
|
||||
------------------------
|
||||
|
||||
|
||||
48
docs/kube-ovn.md
Normal file
48
docs/kube-ovn.md
Normal file
@@ -0,0 +1,48 @@
|
||||
Kube-OVN
|
||||
===========
|
||||
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
|
||||
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
|
||||
|
||||
## How to use it
|
||||
|
||||
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||
```
|
||||
...
|
||||
kube_network_plugin: kube-ovn
|
||||
...
|
||||
```
|
||||
|
||||
## Verifying kube-ovn install
|
||||
|
||||
Kube-OVN run ovn and controller in `kube-ovn` namespace
|
||||
|
||||
* Check the status of kube-ovn pods
|
||||
|
||||
```
|
||||
# From the CLI
|
||||
kubectl get pod -n kube-ovn
|
||||
|
||||
# Output
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-ovn-cni-49lsm 1/1 Running 0 2d20h
|
||||
kube-ovn-cni-9db8f 1/1 Running 0 2d20h
|
||||
kube-ovn-cni-wftdk 1/1 Running 0 2d20h
|
||||
kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h
|
||||
ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h
|
||||
ovs-ovn-hqn8p 1/1 Running 0 4d16h
|
||||
ovs-ovn-hvpl8 1/1 Running 0 4d16h
|
||||
ovs-ovn-r5frh 1/1 Running 0 4d16h
|
||||
```
|
||||
|
||||
* Check the default and node subnet
|
||||
|
||||
```
|
||||
# From the CLI
|
||||
kubectl get subnet
|
||||
|
||||
# Output
|
||||
NAME PROTOCOL CIDR PRIVATE NAT
|
||||
join IPv4 100.64.0.0/16 false false
|
||||
ovn-default IPv4 10.16.0.0/16 false true
|
||||
```
|
||||
48
docs/macvlan.md
Normal file
48
docs/macvlan.md
Normal file
@@ -0,0 +1,48 @@
|
||||
Macvlan
|
||||
===============
|
||||
|
||||
How to use it :
|
||||
-------------
|
||||
|
||||
|
||||
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||
```
|
||||
...
|
||||
kube_network_plugin: macvlan
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
|
||||
```
|
||||
all:
|
||||
hosts:
|
||||
node1:
|
||||
ip: 10.2.2.1
|
||||
access_ip: 10.2.2.1
|
||||
ansible_host: 10.2.2.1
|
||||
macvlan_interface: ens5
|
||||
```
|
||||
|
||||
|
||||
|
||||
Issue encountered :
|
||||
-------------
|
||||
|
||||
- Service DNS
|
||||
|
||||
reply from unexpected source:
|
||||
|
||||
add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
||||
|
||||
|
||||
- Disable nodelocaldns
|
||||
|
||||
The nodelocal dns IP is not reacheable.
|
||||
|
||||
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
|
||||
```
|
||||
enable_nodelocaldns: false
|
||||
```
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ Kubespray's roadmap
|
||||
- [ ] GCE
|
||||
- [x] AWS (contrib/terraform/aws)
|
||||
- [x] Openstack (contrib/terraform/openstack)
|
||||
- [x] Packet
|
||||
- [ ] Digital Ocean
|
||||
- [ ] Azure
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
@@ -23,11 +24,11 @@ Kubespray's roadmap
|
||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||
|
||||
### Tests
|
||||
- [ ] Run kubernetes e2e tests
|
||||
- [x] Run kubernetes e2e tests
|
||||
- [ ] Test idempotency on single OS but for all network plugins/container engines
|
||||
- [ ] single test on AWS per day
|
||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
- [ ] Reorganize CI test vars into group var files
|
||||
- [x] Reorganize CI test vars into group var files
|
||||
|
||||
### Lifecycle
|
||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||
@@ -42,23 +43,10 @@ Kubespray's roadmap
|
||||
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
|
||||
### Addons (helm or native ansible)
|
||||
Include optionals deployments to init the cluster:
|
||||
##### Monitoring
|
||||
- Heapster / Grafana ....
|
||||
- **Prometheus**
|
||||
- [x] Helm
|
||||
- [x] Ingress-nginx
|
||||
- [x] kubernetes-dashboard
|
||||
|
||||
##### Others
|
||||
|
||||
##### Dashboards:
|
||||
- kubernetes-dashboard
|
||||
- Fabric8
|
||||
- Tectonic
|
||||
- Cockpit
|
||||
|
||||
##### Paas like
|
||||
- Openshift Origin
|
||||
- Openstack
|
||||
- Deis Workflow
|
||||
|
||||
### Others
|
||||
- Organize and update documentation (split in categories)
|
||||
|
||||
164
docs/vagrant.md
164
docs/vagrant.md
@@ -1,69 +1,129 @@
|
||||
Vagrant Install
|
||||
=================
|
||||
Introduction
|
||||
============
|
||||
|
||||
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||
with vmware, but is untested) you should be able to launch a 3 node
|
||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||
Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`.
|
||||
|
||||
This will spin up 3 VMs and install kubernetes on them. Once they are
|
||||
completed you can connect to any of them by running <br />
|
||||
`$ vagrant ssh k8s-0[1..3]`.
|
||||
To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time.
|
||||
|
||||
```
|
||||
$ vagrant up
|
||||
Bringing machine 'k8s-01' up with 'virtualbox' provider...
|
||||
Bringing machine 'k8s-02' up with 'virtualbox' provider...
|
||||
Bringing machine 'k8s-03' up with 'virtualbox' provider...
|
||||
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
|
||||
...
|
||||
...
|
||||
k8s-03: Running ansible-playbook...
|
||||
|
||||
PLAY [k8s-cluster] *************************************************************
|
||||
|
||||
TASK [setup] *******************************************************************
|
||||
ok: [k8s-03]
|
||||
ok: [k8s-01]
|
||||
ok: [k8s-02]
|
||||
...
|
||||
...
|
||||
PLAY RECAP *********************************************************************
|
||||
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
|
||||
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
|
||||
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
|
||||
|
||||
$ vagrant ssh k8s-01
|
||||
vagrant@k8s-01:~$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
k8s-01 Ready 45s
|
||||
k8s-02 Ready 45s
|
||||
k8s-03 Ready 45s
|
||||
```
|
||||
For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well.
|
||||
|
||||
Customize Vagrant
|
||||
=================
|
||||
|
||||
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
|
||||
or through an override file.
|
||||
|
||||
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
|
||||
|
||||
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
|
||||
e.g.:
|
||||
|
||||
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
|
||||
|
||||
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
|
||||
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below.
|
||||
|
||||
Use alternative OS for Vagrant
|
||||
==============================
|
||||
|
||||
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
|
||||
operating system for your local cluster.
|
||||
By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster.
|
||||
|
||||
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
||||
|
||||
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
||||
|
||||
|
||||
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
||||
|
||||
File and image caching
|
||||
======================
|
||||
|
||||
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](docs/downloads.md).
|
||||
|
||||
Example use of Vagrant
|
||||
======================
|
||||
|
||||
The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>.
|
||||
|
||||
```
|
||||
# use virtualenv to install all python requirements
|
||||
VENVDIR=venv
|
||||
virtualenv --python=/usr/bin/python3.7 $VENVDIR
|
||||
source $VENVDIR/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
# prepare an inventory to test with
|
||||
INV=inventory/my_lab
|
||||
rm -rf ${INV}.bak &> /dev/null
|
||||
mv ${INV} ${INV}.bak &> /dev/null
|
||||
cp -a inventory/sample ${INV}
|
||||
rm -f ${INV}/hosts.ini
|
||||
|
||||
# customize the vagrant environment
|
||||
mkdir vagrant
|
||||
cat << EOF > vagrant/config.rb
|
||||
\$instance_name_prefix = "kub"
|
||||
\$vm_cpus = 1
|
||||
\$num_instances = 3
|
||||
\$os = "centos-bento"
|
||||
\$subnet = "10.0.20"
|
||||
\$network_plugin = "flannel"
|
||||
\$inventory = "$INV"
|
||||
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
|
||||
EOF
|
||||
|
||||
# make the rpm cache
|
||||
mkdir -p temp/docker_rpms
|
||||
|
||||
vagrant up
|
||||
|
||||
# make a copy of the downloaded docker rpm, to speed up the next provisioning run
|
||||
scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/
|
||||
|
||||
# copy kubectl access configuration in place
|
||||
mkdir $HOME/.kube/ &> /dev/null
|
||||
ln -s $INV/artifacts/admin.conf $HOME/.kube/config
|
||||
# make the kubectl binary available
|
||||
sudo ln -s $INV/artifacts/kubectl /usr/local/bin/kubectl
|
||||
#or
|
||||
export PATH=$PATH:$INV/artifacts
|
||||
```
|
||||
If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible:
|
||||
```
|
||||
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
|
||||
```
|
||||
If all went well, you check if it's all working as expected:
|
||||
```
|
||||
kubectl get nodes
|
||||
```
|
||||
The output should look like this:
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kub-1 Ready master 32m v1.14.1
|
||||
kub-2 Ready master 31m v1.14.1
|
||||
kub-3 Ready <none> 31m v1.14.1
|
||||
```
|
||||
Another nice test is the following:
|
||||
```
|
||||
kubectl get po --all-namespaces -o wide
|
||||
```
|
||||
Which should yield something like the following:
|
||||
```
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none>
|
||||
kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none>
|
||||
kube-system dns-autoscaler-5fc5fdbf6-5c48k 1/1 Running 0 31m 10.233.66.3 kub-3 <none> <none>
|
||||
kube-system kube-apiserver-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||
kube-system kube-apiserver-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system kube-controller-manager-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||
kube-system kube-controller-manager-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system kube-flannel-8tgcn 2/2 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||
kube-system kube-flannel-b2hgt 2/2 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||
kube-system kube-flannel-zx4bc 2/2 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system kube-proxy-4bjdn 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system kube-proxy-l5tt5 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||
kube-system kube-proxy-x59q8 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||
kube-system kube-scheduler-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||
kube-system kube-scheduler-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system kubernetes-dashboard-6c7466966c-jqz42 1/1 Running 0 31m 10.233.66.4 kub-3 <none> <none>
|
||||
kube-system nginx-proxy-kub-3 1/1 Running 0 32m 10.0.20.103 kub-3 <none> <none>
|
||||
kube-system nodelocaldns-2x7vh 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||
kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||
kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||
```
|
||||
Create clusteradmin rbac and get the login token for the dashboard:
|
||||
```
|
||||
kubectl create -f contrib/misc/clusteradmin-rbac.yml
|
||||
kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$'
|
||||
```
|
||||
Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login).
|
||||
|
||||
|
||||
13
docs/vars.md
13
docs/vars.md
@@ -57,10 +57,16 @@ following default cluster parameters:
|
||||
10.233.0.0/18). Must not overlap with kube_pods_subnet
|
||||
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||
overlap with kube_service_addresses.
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
|
||||
on the CoreDNS service.
|
||||
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
|
||||
(default is k8s_external.local)
|
||||
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
|
||||
on the CoreDNS service.
|
||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||
OpenStack (default is unset)
|
||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||
@@ -98,6 +104,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
@@ -117,11 +124,13 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
from the kube-apiserver when the certificate expiration approaches.
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* must be defined as a dict:
|
||||
*node_labels* can be defined either as a dict or a comma-separated labels string:
|
||||
```
|
||||
node_labels:
|
||||
label1_name: label1_value
|
||||
label2_name: label2_value
|
||||
|
||||
node_labels: "label1_name=label1_value,label2_name=label2_value"
|
||||
```
|
||||
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
|
||||
For example, taints can be set in the inventory as variables or more widely in group_vars.
|
||||
|
||||
@@ -27,12 +27,6 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
|
||||
@@ -12,3 +12,4 @@ node1
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
calico-rr
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
## Directory where etcd data stored
|
||||
etcd_data_dir: /var/lib/etcd
|
||||
|
||||
## Experimental kubeadm etcd deployment mode. Available only for new deployment
|
||||
etcd_kubeadm_enabled: false
|
||||
|
||||
## Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# # When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
# openstack_blockstorage_ignore_volume_az: yes
|
||||
# # When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
# openstack_lbaas_enabled: True
|
||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
# # To enable automatic floating ip provisioning, specify a subnet.
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
# # Override default LBaaS behavior
|
||||
## Override default LBaaS behavior
|
||||
# openstack_lbaas_use_octavia: False
|
||||
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# openstack_lbaas_provider: "haproxy"
|
||||
|
||||
@@ -35,6 +35,8 @@ local_volume_provisioner_enabled: false
|
||||
# local-storage:
|
||||
# host_dir: /mnt/disks
|
||||
# mount_dir: /mnt/disks
|
||||
# volume_mode: Filesystem
|
||||
# fs_type: ext4
|
||||
# fast-disks:
|
||||
# host_dir: /mnt/fast-disks
|
||||
# mount_dir: /mnt/fast-disks
|
||||
@@ -78,8 +80,9 @@ rbd_provisioner_enabled: false
|
||||
# Nginx ingress controller deployment
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
ingress_publish_status_address: ""
|
||||
# ingress_nginx_nodeselector:
|
||||
# beta.kubernetes.io/os: "linux": ""
|
||||
# beta.kubernetes.io/os: "linux"
|
||||
# ingress_nginx_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
@@ -94,7 +97,7 @@ ingress_nginx_enabled: false
|
||||
# ingress_nginx_configmap_tcp_services:
|
||||
# 9000: "default/example-go:8080"
|
||||
# ingress_nginx_configmap_udp_services:
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
# 53: "kube-system/coredns:53"
|
||||
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
|
||||
@@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.14.1
|
||||
kube_version: v1.15.11
|
||||
|
||||
# kubernetes image repo define
|
||||
kube_image_repo: "gcr.io/google-containers"
|
||||
@@ -134,6 +134,12 @@ dns_mode: coredns
|
||||
# Enable nodelocal dns cache
|
||||
enable_nodelocaldns: true
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
nodelocaldns_health_port: 9254
|
||||
# Enable k8s_external plugin for CoreDNS
|
||||
enable_coredns_k8s_external: false
|
||||
coredns_k8s_external_zone: k8s_external.local
|
||||
# Enable endpoint_pod_names option for kubernetes plugin
|
||||
enable_coredns_k8s_endpoint_pod_names: false
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
@@ -145,7 +151,7 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
## Container runtime
|
||||
## docker for docker and crio for cri-o.
|
||||
## docker for docker, crio for cri-o and containerd for containerd.
|
||||
container_manager: docker
|
||||
|
||||
## Settings for containerized control plane (etcd/kubelet/secrets)
|
||||
@@ -187,6 +193,18 @@ podsecuritypolicy_enabled: false
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
|
||||
## Optionally reserve resources for OS system daemons.
|
||||
# system_reserved: true
|
||||
## Uncomment to override default values
|
||||
# system_memory_reserved: 512M
|
||||
# system_cpu_reserved: 500m
|
||||
## Reservation for master hosts
|
||||
# system_master_memory_reserved: 256M
|
||||
# system_master_cpu_reserved: 250m
|
||||
|
||||
# An alternative flexvolume plugin directory
|
||||
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
|
||||
|
||||
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||
## That can be useful for example to setup a keepalived virtual IP
|
||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# private interface, on a l2-network
|
||||
macvlan_interface: "eth1"
|
||||
|
||||
# Enable nat in default gateway network interface
|
||||
enable_nat_default_gateway: true
|
||||
@@ -28,6 +28,9 @@
|
||||
# node5
|
||||
# node6
|
||||
|
||||
[calico-rr]
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
calico-rr
|
||||
|
||||
20
mitogen.yaml
20
mitogen.yaml
@@ -3,29 +3,29 @@
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: master
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/{{mitogen_version}}.zip
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/{{ mitogen_version }}.zip
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
file:
|
||||
path: "{{item}}"
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
become: false
|
||||
loop:
|
||||
- "{{playbook_dir}}/plugins/mitogen"
|
||||
- "{{playbook_dir}}/dist"
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: download mitogen release
|
||||
get_url:
|
||||
url: "{{mitogen_url}}"
|
||||
dest: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
|
||||
validate_certs: true
|
||||
|
||||
- name: extract zip
|
||||
unarchive:
|
||||
src: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
|
||||
dest: "{{playbook_dir}}/dist/"
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: copy plugin
|
||||
synchronize:
|
||||
src: "{{playbook_dir}}/dist/mitogen-{{mitogen_version}}/"
|
||||
dest: "{{playbook_dir}}/plugins/mitogen"
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.8"
|
||||
assert:
|
||||
@@ -12,12 +13,8 @@
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
- hosts: all
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
||||
gather_facts: no
|
||||
vars_prompt:
|
||||
name: "delete_nodes_confirmation"
|
||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||
@@ -31,14 +28,20 @@
|
||||
when: delete_nodes_confirmation != "yes"
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||
|
||||
- hosts: "{{ node | default('kube-node') }}"
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: reset, tags: reset }
|
||||
- { role: reset, tags: reset, when: reset_nodes|default(True) }
|
||||
|
||||
- hosts: kube-master
|
||||
# Currently cannot remove first master or etcd
|
||||
- hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: remove-node/post-remove, tags: post-remove }
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
ansible>=2.7.8
|
||||
jinja2>=2.9.6
|
||||
netaddr
|
||||
pbr>=1.6
|
||||
hvac
|
||||
jmespath
|
||||
ruamel.yaml
|
||||
ansible==2.7.12
|
||||
jinja2==2.10.1
|
||||
netaddr==0.7.19
|
||||
pbr==5.2.0
|
||||
hvac==0.8.2
|
||||
jmespath==0.9.4
|
||||
ruamel.yaml==0.15.96
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
---
|
||||
- name: User | Create User Group
|
||||
group:
|
||||
name: "{{user.group|default(user.name)}}"
|
||||
system: "{{user.system|default(omit)}}"
|
||||
name: "{{ user.group|default(user.name) }}"
|
||||
system: "{{ user.system|default(omit) }}"
|
||||
|
||||
- name: User | Create User
|
||||
user:
|
||||
comment: "{{user.comment|default(omit)}}"
|
||||
createhome: "{{user.createhome|default(omit)}}"
|
||||
group: "{{user.group|default(user.name)}}"
|
||||
home: "{{user.home|default(omit)}}"
|
||||
shell: "{{user.shell|default(omit)}}"
|
||||
name: "{{user.name}}"
|
||||
system: "{{user.system|default(omit)}}"
|
||||
comment: "{{ user.comment|default(omit) }}"
|
||||
createhome: "{{ user.createhome|default(omit) }}"
|
||||
group: "{{ user.group|default(user.name) }}"
|
||||
home: "{{ user.home|default(omit) }}"
|
||||
shell: "{{ user.shell|default(omit) }}"
|
||||
name: "{{ user.name }}"
|
||||
system: "{{ user.system|default(omit) }}"
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
---
|
||||
- set_fact:
|
||||
- name: set bastion host IP
|
||||
set_fact:
|
||||
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
|
||||
delegate_to: localhost
|
||||
|
||||
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
||||
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
|
||||
- set_fact:
|
||||
- name: Store the current ansible_user in the real_user fact
|
||||
set_fact:
|
||||
real_user: "{{ ansible_user }}"
|
||||
|
||||
- name: create ssh bastion conf
|
||||
|
||||
@@ -15,4 +15,4 @@ Host {{ bastion_ip }}
|
||||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
|
||||
@@ -23,7 +23,6 @@ Variables are listed with their default values, if applicable.
|
||||
|
||||
* `http_proxy`/`https_proxy`
|
||||
The role will configure the package manager (if applicable) to download packages via a proxy.
|
||||
This is currently implemented for CentOS/RHEL (`http_proxy` only) as well as Debian and Ubuntu (both `http_proxy` and `https_proxy` are respected)
|
||||
|
||||
* `override_system_hostname: true`
|
||||
The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
- name: Run bootstrap.sh
|
||||
script: bootstrap.sh
|
||||
become: true
|
||||
environment:
|
||||
http_proxy: "{{ http_proxy | default('') }}"
|
||||
https_proxy: "{{ https_proxy | default('') }}"
|
||||
when:
|
||||
- need_bootstrap.rc != 0
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Check http::proxy in /etc/apt/apt.conf
|
||||
raw: grep -qsi 'Acquire::http::proxy' /etc/apt/apt.conf
|
||||
- name: Check http::proxy in apt configuration files
|
||||
raw: apt-config dump | grep -qsi 'Acquire::http::proxy'
|
||||
register: need_http_proxy
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
@@ -31,8 +31,8 @@
|
||||
- http_proxy is defined
|
||||
- need_http_proxy.rc != 0
|
||||
|
||||
- name: Check https::proxy in /etc/apt/apt.conf
|
||||
raw: grep -qsi 'Acquire::https::proxy' /etc/apt/apt.conf
|
||||
- name: Check https::proxy in apt configuration files
|
||||
raw: apt-config dump | grep -qsi 'Acquire::https::proxy'
|
||||
register: need_https_proxy
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
@@ -25,6 +25,26 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Check if a proxy is set in /etc/dnf/dnf.conf
|
||||
raw: grep -qs 'proxy=' /etc/dnf/dnf.conf
|
||||
register: need_http_proxy
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
# This command should always run, even in check mode
|
||||
check_mode: false
|
||||
environment: {}
|
||||
when:
|
||||
- http_proxy is defined
|
||||
|
||||
- name: Add http_proxy to /etc/dnf/dnf.conf if http_proxy is defined
|
||||
raw: echo 'proxy={{ http_proxy }}' >> /etc/dnf/dnf.conf
|
||||
become: true
|
||||
environment: {}
|
||||
when:
|
||||
- http_proxy is defined
|
||||
- need_http_proxy.rc != 0
|
||||
- not is_atomic
|
||||
|
||||
# Fedora's policy as of Fedora 30 is to still install python2 as /usr/bin/python
|
||||
# See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3 for the current status
|
||||
- name: Install python on fedora
|
||||
|
||||
@@ -1,6 +1,33 @@
|
||||
---
|
||||
# OpenSUSE ships with Python installed
|
||||
|
||||
- name: Set the http_proxy in /etc/sysconfig/proxy
|
||||
lineinfile:
|
||||
path: /etc/sysconfig/proxy
|
||||
regexp: '^HTTP_PROXY='
|
||||
line: 'HTTP_PROXY="{{ http_proxy }}"'
|
||||
become: true
|
||||
when:
|
||||
- http_proxy is defined
|
||||
|
||||
- name: Set the https_proxy in /etc/sysconfig/proxy
|
||||
lineinfile:
|
||||
path: /etc/sysconfig/proxy
|
||||
regexp: '^HTTPS_PROXY='
|
||||
line: 'HTTPS_PROXY="{{ https_proxy }}"'
|
||||
become: true
|
||||
when:
|
||||
- https_proxy is defined
|
||||
|
||||
- name: Enable proxies
|
||||
lineinfile:
|
||||
path: /etc/sysconfig/proxy
|
||||
regexp: '^PROXY_ENABLED='
|
||||
line: 'PROXY_ENABLED="yes"'
|
||||
become: true
|
||||
when:
|
||||
- http_proxy is defined or https_proxy is defined
|
||||
|
||||
# Without this package, the get_url module fails when trying to handle https
|
||||
- name: Install python-cryptography
|
||||
zypper:
|
||||
|
||||
21
roles/bootstrap-os/tasks/bootstrap-oracle.yml
Normal file
21
roles/bootstrap-os/tasks/bootstrap-oracle.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Download Oracle Linux public yum repo
|
||||
get_url:
|
||||
url: https://yum.oracle.com/public-yum-ol7.repo
|
||||
dest: /etc/yum.repos.d/public-yum-ol7.repo
|
||||
|
||||
- name: Enable Oracle Linux repo
|
||||
ini_file:
|
||||
dest: /etc/yum.repos.d/public-yum-ol7.repo
|
||||
section: "{{ item }}"
|
||||
option: enabled
|
||||
value: "1"
|
||||
with_items:
|
||||
- ol7_latest
|
||||
- ol7_addons
|
||||
- ol7_developer_EPEL
|
||||
|
||||
- name: Install packages requirements for bootstrap
|
||||
yum:
|
||||
name: container-selinux
|
||||
state: present
|
||||
@@ -25,6 +25,9 @@
|
||||
- include_tasks: bootstrap-opensuse.yml
|
||||
when: '"openSUSE" in os_release.stdout'
|
||||
|
||||
- include_tasks: bootstrap-oracle.yml
|
||||
when: '"Oracle" in os_release.stdout'
|
||||
|
||||
- name: Create remote_tmp for it is used by another module
|
||||
file:
|
||||
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
|
||||
@@ -69,3 +72,11 @@
|
||||
- ceph-common
|
||||
state: present
|
||||
when: rbd_provisioner_enabled|default(false)
|
||||
|
||||
- name: Ensure bash_completion.d folder exists
|
||||
file:
|
||||
name: /etc/bash_completion.d/
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
44
roles/container-engine/containerd/defaults/main.yml
Normal file
44
roles/container-engine/containerd/defaults/main.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
kubelet_cgroup_driver: systemd
|
||||
|
||||
containerd_config:
|
||||
grpc:
|
||||
max_recv_message_size: 16777216
|
||||
max_send_message_size: 16777216
|
||||
debug:
|
||||
level: ""
|
||||
registries:
|
||||
"docker.io": "https://registry-1.docker.io"
|
||||
max_container_log_line_size: -1
|
||||
|
||||
containerd_version: '1.2.6'
|
||||
containerd_package: 'containerd.io'
|
||||
|
||||
containerd_cfg_dir: /etc/containerd
|
||||
|
||||
# Path to runc binary
|
||||
runc_binary: /usr/sbin/runc
|
||||
|
||||
|
||||
yum_repo_dir: /etc/yum.repos.d
|
||||
yum_conf: /etc/yum.conf
|
||||
containerd_yum_conf: /etc/yum_containerd.conf
|
||||
|
||||
# Optional values for containerd apt repo
|
||||
containerd_package_info:
|
||||
pkgs:
|
||||
|
||||
containerd_repo_key_info:
|
||||
repo_keys:
|
||||
|
||||
containerd_repo_info:
|
||||
repos:
|
||||
|
||||
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"
|
||||
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
|
||||
|
||||
# Ubuntu docker-ce repo
|
||||
containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
containerd_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||
containerd_ubuntu_repo_repokey: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
|
||||
containerd_ubuntu_repo_component: 'stable'
|
||||
20
roles/container-engine/containerd/handlers/main.yml
Normal file
20
roles/container-engine/containerd/handlers/main.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: restart containerd
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Containerd | restart containerd
|
||||
- Containerd | wait for containerd
|
||||
|
||||
- name: Containerd | restart containerd
|
||||
systemd:
|
||||
name: containerd
|
||||
state: restarted
|
||||
enabled: yes
|
||||
daemon-reload: yes
|
||||
|
||||
- name: Containerd | wait for containerd
|
||||
command: "{{ containerd_bin_dir }}/ctr images ls -q"
|
||||
register: containerd_ready
|
||||
retries: 8
|
||||
delay: 4
|
||||
until: containerd_ready.rc == 0
|
||||
85
roles/container-engine/containerd/tasks/containerd_repo.yml
Normal file
85
roles/container-engine/containerd/tasks/containerd_repo.yml
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
- name: ensure containerd repository public key is installed
|
||||
action: "{{ containerd_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
id: "{{ item }}"
|
||||
url: "{{ containerd_repo_key_info.url }}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ containerd_repo_key_info.repo_keys }}"
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- not is_atomic
|
||||
|
||||
- name: ensure containerd repository is enabled
|
||||
action: "{{ containerd_repo_info.pkg_repo }}"
|
||||
args:
|
||||
repo: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ containerd_repo_info.repos }}"
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- not is_atomic
|
||||
- containerd_repo_info.repos|length > 0
|
||||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
- name: Set containerd pin priority to apt_preferences on Debian family
|
||||
template:
|
||||
src: "apt_preferences.d/debian_containerd.j2"
|
||||
dest: "/etc/apt/preferences.d/containerd"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- not is_atomic
|
||||
|
||||
- name: Configure containerd repository on Fedora
|
||||
template:
|
||||
src: "fedora_containerd.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
when: ansible_distribution == "Fedora" and not is_atomic
|
||||
|
||||
- name: Configure containerd repository on RedHat/CentOS
|
||||
template:
|
||||
src: "rh_containerd.repo.j2"
|
||||
dest: "{{ yum_repo_dir }}/containerd.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: check if container-selinux is available
|
||||
yum:
|
||||
list: "container-selinux"
|
||||
register: yum_result
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
|
||||
yum_repository:
|
||||
name: extras
|
||||
description: "CentOS-7 - Extras"
|
||||
state: present
|
||||
baseurl: "{{ extras_rh_repo_base_url }}"
|
||||
file: "extras"
|
||||
gpgcheck: yes
|
||||
gpgkey: "{{ extras_rh_repo_gpgkey }}"
|
||||
keepcache: "{{ containerd_rpm_keepcache | default('1') }}"
|
||||
proxy: " {{ http_proxy | default('_none_') }}"
|
||||
when:
|
||||
- ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
- yum_result.results | length == 0
|
||||
|
||||
- name: Copy yum.conf for editing
|
||||
copy:
|
||||
src: "{{ yum_conf }}"
|
||||
dest: "{{ containerd_yum_conf }}"
|
||||
remote_src: yes
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Edit copy of yum.conf to set obsoletes=0
|
||||
lineinfile:
|
||||
path: "{{ containerd_yum_conf }}"
|
||||
state: present
|
||||
regexp: '^obsoletes='
|
||||
line: 'obsoletes=0'
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
27
roles/container-engine/containerd/tasks/crictl.yml
Normal file
27
roles/container-engine/containerd/tasks/crictl.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: crictl | Download crictl
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
template:
|
||||
src: ../templates/crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: bin
|
||||
mode: 0644
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
synchronize:
|
||||
src: "{{ local_release_dir }}/crictl"
|
||||
dest: "{{ bin_dir }}/crictl"
|
||||
compress: no
|
||||
perms: yes
|
||||
owner: no
|
||||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
|
||||
- name: Install crictl completion
|
||||
shell: "{{ bin_dir }}/crictl completion >/etc/bash_completion.d/crictl"
|
||||
ignore_errors: True
|
||||
when: ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]
|
||||
95
roles/container-engine/containerd/tasks/main.yml
Normal file
95
roles/container-engine/containerd/tasks/main.yml
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
- name: Fail containerd setup if distribution is not supported
|
||||
fail:
|
||||
msg: "{{ ansible_distribution }} is not supported by containerd."
|
||||
when:
|
||||
- not ansible_distribution in ["CentOS","RedHat", "Ubuntu", "Debian"]
|
||||
|
||||
- name: gather os specific variables
|
||||
include_vars: "{{ item }}"
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
|
||||
- "{{ ansible_distribution|lower }}.yml"
|
||||
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
|
||||
- "{{ ansible_os_family|lower }}.yml"
|
||||
- defaults.yml
|
||||
paths:
|
||||
- ../vars
|
||||
skip: true
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- include_tasks: containerd_repo.yml
|
||||
|
||||
- name: ensure containerd config directory
|
||||
file:
|
||||
dest: "{{ containerd_cfg_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Copy containerd config file
|
||||
template:
|
||||
src: config.toml.j2
|
||||
dest: "{{ containerd_cfg_dir }}/config.toml"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
notify: restart containerd
|
||||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
- name: Set containerd pin priority to apt_preferences on Debian family
|
||||
template:
|
||||
src: "apt_preferences.d/debian_containerd.j2"
|
||||
dest: "/etc/apt/preferences.d/containerd"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family in ['Ubuntu', 'Debian']
|
||||
- not is_atomic
|
||||
|
||||
- name: ensure containerd packages are installed
|
||||
action: "{{ containerd_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: "{{ item.name }}"
|
||||
force: "{{ item.force | default(omit) }}"
|
||||
conf_file: "{{ item.yum_conf | default(omit) }}"
|
||||
state: present
|
||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||
register: containerd_task_result
|
||||
until: containerd_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ containerd_package_info.pkgs }}"
|
||||
notify: restart containerd
|
||||
when:
|
||||
- not is_atomic
|
||||
- containerd_package_info.pkgs|length > 0
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check if runc is installed
|
||||
stat:
|
||||
path: "{{ runc_binary }}"
|
||||
register: runc_stat
|
||||
|
||||
- name: Install runc package if necessary
|
||||
action: "{{ containerd_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: runc
|
||||
state: present
|
||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||
register: runc_task_result
|
||||
until: runc_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
notify: restart containerd
|
||||
when:
|
||||
- not is_atomic
|
||||
- not runc_stat.stat.exists
|
||||
|
||||
- include_tasks: crictl.yml
|
||||
@@ -0,0 +1,3 @@
|
||||
Package: {{ containerd_package }}
|
||||
Pin: version {{ containerd_version }}*
|
||||
Pin-Priority: 1001
|
||||
40
roles/container-engine/containerd/templates/config.toml.j2
Normal file
40
roles/container-engine/containerd/templates/config.toml.j2
Normal file
@@ -0,0 +1,40 @@
|
||||
# Kubernetes doesn't use containerd restart manager.
|
||||
disabled_plugins = ["restart"]
|
||||
|
||||
[debug]
|
||||
level = "{{ containerd_config.debug.level | default("") }}"
|
||||
|
||||
{% if 'grpc' in containerd_config %}
|
||||
[grpc]
|
||||
{% for param, value in containerd_config.grpc.items() %}
|
||||
{{ param }} = {{ value }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
[plugins.linux]
|
||||
shim = "/usr/bin/containerd-shim"
|
||||
runtime = "{{ runc_binary }}"
|
||||
|
||||
[plugins.cri]
|
||||
stream_server_address = "127.0.0.1"
|
||||
max_container_log_line_size = {{ containerd_config.max_container_log_line_size }}
|
||||
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
|
||||
[plugins.cri.cni]
|
||||
bin_dir = "/opt/cni/bin"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = ""
|
||||
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
runtime_type = ""
|
||||
runtime_engine = ""
|
||||
runtime_root = ""
|
||||
|
||||
{% if 'registries' in containerd_config %}
|
||||
[plugins.cri.registry]
|
||||
[plugins.cri.registry.mirrors]
|
||||
{% for registry, addr in containerd_config.registries.items() %}
|
||||
[plugins.cri.registry.mirrors."{{ registry }}"]
|
||||
endpoint = ["{{ addr }}"]
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,4 @@
|
||||
runtime-endpoint: unix://{{ cri_socket }}
|
||||
image-endpoint: unix://{{ cri_socket }}
|
||||
timeout: 30
|
||||
debug: false
|
||||
@@ -0,0 +1,9 @@
|
||||
[docker-ce]
|
||||
name=Docker-CE Repository
|
||||
baseurl={{ docker_rh_repo_base_url }}
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
keepcache={{ docker_rpm_keepcache | default('1') }}
|
||||
gpgkey={{ docker_rh_repo_gpgkey }}
|
||||
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
||||
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}
|
||||
@@ -0,0 +1,9 @@
|
||||
[docker-ce]
|
||||
name=Docker-CE Repository
|
||||
baseurl={{ docker_rh_repo_base_url }}
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
keepcache={{ docker_rpm_keepcache | default('1') }}
|
||||
gpgkey={{ docker_rh_repo_gpgkey }}
|
||||
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
||||
{% if ansible_os_family == "RedHat" and ansible_distribution_major_version|int == 8 %}module_hotfixes=True{% endif %}
|
||||
28
roles/container-engine/containerd/vars/redhat.yml
Normal file
28
roles/container-engine/containerd/vars/redhat.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
|
||||
containerd_versioned_pkg:
|
||||
'latest': "{{ containerd_package }}"
|
||||
'1.2.4': "{{ containerd_package }}-1.2.4-3.1.el7"
|
||||
'1.2.5': "{{ containerd_package }}-1.2.5-3.1.el7"
|
||||
'1.2.6': "{{ containerd_package }}-1.2.6-3.3.el7"
|
||||
'stable': "{{ containerd_package }}-1.2.6-3.3.el7"
|
||||
'edge': "{{ containerd_package }}-1.2.6-3.3.el7"
|
||||
|
||||
containerd_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
|
||||
containerd_pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
yum_conf: "{{ containerd_yum_conf }}"
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
|
||||
runc_binary: /bin/runc
|
||||
17
roles/container-engine/containerd/vars/suse.yml
Normal file
17
roles/container-engine/containerd/vars/suse.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
# docker-ce containerd.io does not contain daemon
|
||||
containerd_package: containerd
|
||||
|
||||
containerd_package_info:
|
||||
pkg_mgr: zypper
|
||||
pkgs:
|
||||
- name: "{{ containerd_package }}"
|
||||
state: latest
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
31
roles/container-engine/containerd/vars/ubuntu-amd64.yml
Normal file
31
roles/container-engine/containerd/vars/ubuntu-amd64.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
|
||||
containerd_versioned_pkg:
|
||||
'latest': "{{ containerd_package }}"
|
||||
'1.2.4': "{{ containerd_package }}=1.2.4-1"
|
||||
'1.2.5': "{{ containerd_package }}=1.2.5-1"
|
||||
'1.2.6': "{{ containerd_package }}=1.2.6-3"
|
||||
'stable': "{{ containerd_package }}=1.2.4-1"
|
||||
'edge': "{{ containerd_package }}=1.2.4-1"
|
||||
|
||||
containerd_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ containerd_versioned_pkg[containerd_version | string] }}"
|
||||
force: false
|
||||
|
||||
containerd_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
url: '{{ containerd_ubuntu_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- '{{ containerd_ubuntu_repo_repokey }}'
|
||||
|
||||
containerd_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ containerd_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
{{ containerd_ubuntu_repo_component }}
|
||||
|
||||
runc_binary: /usr/bin/runc
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
crio_rhel_repo_base_url: 'https://cbs.centos.org/repos/paas7-openshift-origin311-candidate/x86_64/os/'
|
||||
crio_rhel_repo_base_url: 'http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin311/'
|
||||
|
||||
@@ -22,7 +22,13 @@
|
||||
description: OpenShift Origin Repo
|
||||
baseurl: "{{ crio_rhel_repo_base_url }}"
|
||||
gpgcheck: no
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_atomic
|
||||
|
||||
- name: Add CRI-O PPA
|
||||
apt_repository:
|
||||
repo: ppa:projectatomic/ppa
|
||||
state: present
|
||||
when: ansible_distribution in ["Ubuntu"]
|
||||
|
||||
- name: Make sure needed folders exist in the system
|
||||
with_items:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user