mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
272 Commits
v2.15.0
...
release-2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c91a05f330 | ||
|
|
a583a2d9aa | ||
|
|
713abf29ca | ||
|
|
247d062c02 | ||
|
|
9fa051780e | ||
|
|
bcf695913f | ||
|
|
23cd1d41fb | ||
|
|
62f5369237 | ||
|
|
59fc17f4e3 | ||
|
|
26c1d42fff | ||
|
|
c1aa755a3c | ||
|
|
b3d9f2b4a2 | ||
|
|
29c2fbdbc1 | ||
|
|
4b9f98f933 | ||
|
|
e9870b8d25 | ||
|
|
e0c74fa082 | ||
|
|
5b93a97281 | ||
|
|
bdf74c6749 | ||
|
|
d6f9a8d752 | ||
|
|
e357d8678c | ||
|
|
b1b407a0b4 | ||
|
|
6c3d1649a6 | ||
|
|
14cf3e138b | ||
|
|
afbabebfd5 | ||
|
|
8c0a2741ae | ||
|
|
1d078e1119 | ||
|
|
d90baa8601 | ||
|
|
d5660cd37c | ||
|
|
63cec45597 | ||
|
|
f07e24db8f | ||
|
|
5d5be3e96a | ||
|
|
6e7649360f | ||
|
|
1dd38721b3 | ||
|
|
6a001e4971 | ||
|
|
96e6a6ac3f | ||
|
|
2556eb2733 | ||
|
|
d29ea386d6 | ||
|
|
a0ee569091 | ||
|
|
3f4eb9be08 | ||
|
|
5ea2d1eb67 | ||
|
|
ffc38a2237 | ||
|
|
360aff4a57 | ||
|
|
d26191373a | ||
|
|
4c06aa98b5 | ||
|
|
1b267b6599 | ||
|
|
dd6efb73f7 | ||
|
|
dfeed1c1a4 | ||
|
|
0071e3c99c | ||
|
|
0feec14b15 | ||
|
|
975f84494c | ||
|
|
7c86734d2e | ||
|
|
8665e1de87 | ||
|
|
c16efc9ab8 | ||
|
|
324c95d37f | ||
|
|
69806e0a46 | ||
|
|
ad15a4b755 | ||
|
|
002a4b03a4 | ||
|
|
96476430a3 | ||
|
|
73db44b00c | ||
|
|
b32d25942d | ||
|
|
fce705a92b | ||
|
|
6164c90f70 | ||
|
|
e036b899a3 | ||
|
|
8c7b90ebbf | ||
|
|
38d9d2ea0e | ||
|
|
384d30b675 | ||
|
|
add61868c6 | ||
|
|
b599f3084f | ||
|
|
a7493e26e1 | ||
|
|
ae3a1d7c01 | ||
|
|
e39e3d5c26 | ||
|
|
1e7d48846a | ||
|
|
6001edeecd | ||
|
|
ce0b7834ff | ||
|
|
3ac92689f0 | ||
|
|
1c0836946f | ||
|
|
bccbe323b7 | ||
|
|
d73249a793 | ||
|
|
cd9a03f86c | ||
|
|
b47c21c683 | ||
|
|
6de5303e3f | ||
|
|
2a2fb68b2f | ||
|
|
844ebb7838 | ||
|
|
332cc1cd58 | ||
|
|
e7ce83016e | ||
|
|
bf6a39eb84 | ||
|
|
42382e2cde | ||
|
|
f8e4650791 | ||
|
|
e444b3c140 | ||
|
|
d56ac216f4 | ||
|
|
420a412234 | ||
|
|
90c643f3ab | ||
|
|
1d4e380231 | ||
|
|
6d293ba899 | ||
|
|
aa086e5407 | ||
|
|
cce0940e1f | ||
|
|
daed3e5b6a | ||
|
|
e2a7f3e2ab | ||
|
|
5a351b4b00 | ||
|
|
6f2abbf79c | ||
|
|
bef1e628ac | ||
|
|
7340a163a4 | ||
|
|
a6622b176b | ||
|
|
771a5e26bb | ||
|
|
be278f9dba | ||
|
|
6479e26904 | ||
|
|
1c7053c9d8 | ||
|
|
596d0289f8 | ||
|
|
7df7054bdc | ||
|
|
5377aac936 | ||
|
|
ceb6c172ad | ||
|
|
7f52c1d3a2 | ||
|
|
af1e16b934 | ||
|
|
2257181ca8 | ||
|
|
7e75d48cc4 | ||
|
|
6330db89a7 | ||
|
|
f05d6b3711 | ||
|
|
cce9d3125d | ||
|
|
e381ce57e2 | ||
|
|
5dbce6a2bd | ||
|
|
5b0e88339a | ||
|
|
db43891f2b | ||
|
|
f72063e7c2 | ||
|
|
36a3a78952 | ||
|
|
2d1597bf10 | ||
|
|
edfa3e9b14 | ||
|
|
6fa3565dac | ||
|
|
7dec8e5caa | ||
|
|
49abf6007a | ||
|
|
f0cdf71ccb | ||
|
|
8655b92e93 | ||
|
|
e1c6992c55 | ||
|
|
486b223e01 | ||
|
|
d53fd29e34 | ||
|
|
4f89bfac48 | ||
|
|
5fee96b404 | ||
|
|
12873f916b | ||
|
|
efa180392b | ||
|
|
6d9ed398e3 | ||
|
|
6d3dbb43a4 | ||
|
|
811f546ea6 | ||
|
|
ead8a4e4de | ||
|
|
05f132c136 | ||
|
|
5f2c8ac38f | ||
|
|
14511053aa | ||
|
|
8353532a09 | ||
|
|
1c62af0c95 | ||
|
|
f103ac7640 | ||
|
|
274e06a48d | ||
|
|
a39f306184 | ||
|
|
69d11daef6 | ||
|
|
057e8b4358 | ||
|
|
18c0e54e4f | ||
|
|
85007fa9a7 | ||
|
|
5c5bf41afe | ||
|
|
5dba53a223 | ||
|
|
2bcd9eb9e9 | ||
|
|
5a54db2f3c | ||
|
|
b47542b003 | ||
|
|
14b63ede8c | ||
|
|
b07c5966a6 | ||
|
|
c7db72e1da | ||
|
|
dc5df57c26 | ||
|
|
a9c97e5253 | ||
|
|
53e5ef6b4e | ||
|
|
8800b5c01d | ||
|
|
280036fad6 | ||
|
|
a6e1f5ece9 | ||
|
|
fedd671d68 | ||
|
|
b7c22659e3 | ||
|
|
c9c0c01de0 | ||
|
|
e442b1d2b9 | ||
|
|
e9f4ff227e | ||
|
|
668bbe0528 | ||
|
|
e045a45e48 | ||
|
|
2c9fc18903 | ||
|
|
d4eecac108 | ||
|
|
ef351e0234 | ||
|
|
05adeed1fa | ||
|
|
15f1b19136 | ||
|
|
154fa45422 | ||
|
|
e35becebf8 | ||
|
|
bdd36c2d34 | ||
|
|
0a0156c946 | ||
|
|
100d9333ca | ||
|
|
a4cc416511 | ||
|
|
2ea5793782 | ||
|
|
0ddf915027 | ||
|
|
067db686f6 | ||
|
|
ed2b4b805e | ||
|
|
8375aa72e2 | ||
|
|
6334e4bd84 | ||
|
|
86ce8aac85 | ||
|
|
de46f86137 | ||
|
|
5616b08229 | ||
|
|
8682a57ea3 | ||
|
|
662a37ab4f | ||
|
|
42947c9840 | ||
|
|
3749729d5a | ||
|
|
fb8b075110 | ||
|
|
1c5391dda7 | ||
|
|
f2d10e9465 | ||
|
|
796d3fb975 | ||
|
|
5c04bdd52b | ||
|
|
17143dbc51 | ||
|
|
1c8bba36db | ||
|
|
95b329b64d | ||
|
|
de1d9df787 | ||
|
|
6450207713 | ||
|
|
edc4bb4a49 | ||
|
|
a21ee33180 | ||
|
|
bcaa31ae33 | ||
|
|
0cc1726781 | ||
|
|
aad78840a0 | ||
|
|
e3ab665e90 | ||
|
|
1a91792e7c | ||
|
|
670c37b428 | ||
|
|
040dacd5cd | ||
|
|
59541de437 | ||
|
|
fc8551bcba | ||
|
|
c2c97c36bc | ||
|
|
211fdde742 | ||
|
|
366cbb3e6f | ||
|
|
a318624fad | ||
|
|
3cf5981146 | ||
|
|
4cc065e66d | ||
|
|
ba731ed145 | ||
|
|
b77460ec34 | ||
|
|
88bee6c68e | ||
|
|
1f84d6344b | ||
|
|
699fbd64ab | ||
|
|
b42bf39fb7 | ||
|
|
5368d51d63 | ||
|
|
c5db012c9a | ||
|
|
b70d986bfa | ||
|
|
973628fc1b | ||
|
|
91fea7c956 | ||
|
|
d378d789cf | ||
|
|
9007d6621a | ||
|
|
774ec49396 | ||
|
|
bba55faae8 | ||
|
|
8f2b0772f9 | ||
|
|
1a409dc7ae | ||
|
|
404ea0270e | ||
|
|
ef939dee74 | ||
|
|
f1576eabb1 | ||
|
|
49c4345c9a | ||
|
|
f94182f77d | ||
|
|
222a77dfe7 | ||
|
|
24ceee134e | ||
|
|
04c8a73889 | ||
|
|
9a75501152 | ||
|
|
f6fbbc17a4 | ||
|
|
15dc3868c3 | ||
|
|
2525d7aff8 | ||
|
|
a5d2137ed9 | ||
|
|
a8e51e686e | ||
|
|
a2429ef64d | ||
|
|
1b88678cf3 | ||
|
|
0e96852159 | ||
|
|
19a61d838f | ||
|
|
4eec302e86 | ||
|
|
f3885aa589 | ||
|
|
b493c81ce8 | ||
|
|
9ef62194c3 | ||
|
|
91ee4aa542 | ||
|
|
e3caff833c | ||
|
|
b2995e4ec4 | ||
|
|
ccd3aeebbc | ||
|
|
7a033a1d55 | ||
|
|
1652d8bf4b | ||
|
|
c85f275bdb |
@@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.14.1
|
||||
KUBESPRAY_VERSION: v2.15.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -30,7 +30,9 @@ variables:
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_14_VERSION: 0.14.10
|
||||
TERRAFORM_13_VERSION: 0.13.6
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.10
|
||||
VAGRANT_VERSION: 2.2.15
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
@@ -42,12 +42,10 @@ packet_centos7-flannel-containerd-addons-ha:
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-crio:
|
||||
packet_centos8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
@@ -225,7 +223,7 @@ packet_ubuntu18-calico-ha-recover:
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
@@ -233,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
|
||||
@@ -53,73 +53,92 @@
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.13.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.14.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.29
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -133,7 +152,7 @@ tf-0.14.x-validate-aws:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.29
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -188,9 +207,10 @@ tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: 0.12.29
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@@ -216,44 +236,45 @@ tf-elastx_ubuntu18-calico:
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
tf-ovh_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -38,6 +38,11 @@ molecule_tests:
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
|
||||
33
Dockerfile
33
Dockerfile
@@ -1,25 +1,30 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
|
||||
ENV KUBE_VERSION=v1.19.7
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U \
|
||||
&& /usr/bin/python3 -m pip install -r tests/requirements.txt \
|
||||
&& python3 -m pip install -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
58
README.md
58
README.md
@@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
@@ -48,11 +48,23 @@ As a consequence, `ansible-playbook` command will fail with:
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.15.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.15.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
@@ -105,49 +117,55 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.19.7
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.20.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.3.9
|
||||
- [cri-o](http://cri-o.io/) v1.19 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.4.4
|
||||
- [cri-o](http://cri-o.io/) v1.20 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.16.5
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.17.4
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.6
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.9
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.5.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.1.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.6.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.2.2
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.7.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.41.2
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.43.0
|
||||
|
||||
Note: The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.18**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is not supported for now**
|
||||
- **Minimum required version of Kubernetes is v1.19**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
@@ -203,6 +221,8 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
|
||||
## Community docs and resources
|
||||
|
||||
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||
|
||||
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@@ -28,7 +28,7 @@ SUPPORTED_OS = {
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -49,6 +49,7 @@ $vm_cpus ||= 2
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
@@ -85,9 +86,9 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
@@ -194,11 +195,22 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
@@ -241,9 +253,9 @@ Vagrant.configure("2") do |config|
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
maximal_ansible_version: 2.10.0
|
||||
maximal_ansible_version: 2.11.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
@@ -15,3 +15,18 @@
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
that: "'127.0.0.1' | ipaddr"
|
||||
tags:
|
||||
- check
|
||||
|
||||
# CentOS 7 provides too old jinja version
|
||||
- name: "Check that jinja is not too old (install via pip)"
|
||||
assert:
|
||||
msg: "Your Jinja version is too old, install via pip"
|
||||
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
|
||||
tags:
|
||||
- check
|
||||
|
||||
41
cluster.yml
41
cluster.yml
@@ -2,16 +2,21 @@
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
- hosts: k8s_cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
@@ -20,9 +25,10 @@
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
- hosts: k8s_cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
@@ -32,6 +38,7 @@
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
@@ -41,9 +48,10 @@
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
@@ -53,49 +61,54 @@
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico-rr
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
@@ -104,16 +117,18 @@
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
||||
@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -21,13 +21,13 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -21,14 +21,14 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-master,etcd"
|
||||
"roles": "kube_control_plane,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-node"
|
||||
"roles": "kube_node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
||||
@@ -46,7 +46,7 @@ test_distro() {
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
||||
@@ -44,8 +44,8 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load']
|
||||
@@ -63,7 +63,9 @@ def get_var_as_bool(name, default):
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS", 2))
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
@@ -102,10 +104,11 @@ class KubesprayInventory(object):
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
else:
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
@@ -266,7 +269,7 @@ class KubesprayInventory(object):
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
@@ -287,52 +290,54 @@ class KubesprayInventory(object):
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s-cluster:children':
|
||||
elif group != 'k8s_cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_master(self, hosts):
|
||||
def set_kube_control_plane(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube-master', host)
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
self.add_host_to_group('kube_node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
@@ -402,9 +407,9 @@ Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_MASTERS Set the number of kube-masters. Default: 2
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
@@ -222,11 +222,11 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_master([host])
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
@@ -241,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
@@ -251,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube-node'
|
||||
group = 'kube_node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
@@ -275,12 +275,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
@@ -291,12 +291,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube-master]
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
@@ -23,16 +23,16 @@
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube-node]
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
||||
@@ -8,7 +8,7 @@ Installs and configures GlusterFS on Linux.
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
@@ -19,4 +19,4 @@
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
||||
@@ -3,17 +3,17 @@ all:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
children:
|
||||
k8s-cluster:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube-master:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube-node:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
# Container image collecting script for offline deployment
|
||||
# Offline deployment
|
||||
|
||||
## manage-offline-container-images.sh
|
||||
|
||||
Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
@@ -19,3 +23,21 @@ Step(2) can be operated with:
|
||||
```shell
|
||||
manage-offline-container-images.sh register
|
||||
```
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
|
||||
|
||||
```shell
|
||||
bash generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── generate.sh
|
||||
└── images.list
|
||||
0 directories, 3 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.
|
||||
|
||||
57
contrib/offline/generate_list.sh
Normal file
57
contrib/offline/generate_list.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${IMAGE_ARCH:="amd64"}
|
||||
: ${ANSIBLE_SYSTEM:="linux"}
|
||||
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
|
||||
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
|
||||
|
||||
cat > ${TEMP_DIR}/generate.sh << EOF
|
||||
arch=${ARCH}
|
||||
image_arch=${IMAGE_ARCH}
|
||||
ansible_system=${ANSIBLE_SYSTEM}
|
||||
ansible_architecture=${ANSIBLE_ARCHITECTURE}
|
||||
EOF
|
||||
|
||||
# generate all component version by $DOWNLOAD_YML
|
||||
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
|
||||
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
|
||||
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all download files url
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all images list
|
||||
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
|
||||
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# add kube-* images to images list
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
|
||||
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# print files.list and images.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list
|
||||
@@ -100,15 +100,35 @@ function register_container_images() {
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
set +e
|
||||
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
org_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${org_image}"
|
||||
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${raw_image}" ]; then
|
||||
echo "Failed to get raw_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${org_image}" ]; then
|
||||
echo "Failed to get org_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${image_id}" ]; then
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
|
||||
4
contrib/os-services/os-services.yml
Normal file
4
contrib/os-services/os-services.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- { role: prepare }
|
||||
2
contrib/os-services/roles/prepare/defaults/main.yml
Normal file
2
contrib/os-services/roles/prepare/defaults/main.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
disable_service_firewall: false
|
||||
23
contrib/os-services/roles/prepare/tasks/main.yml
Normal file
23
contrib/os-services/roles/prepare/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
- name: Disable service firewalld
|
||||
systemd:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
name: ufw
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall
|
||||
@@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
|
||||
@@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
|
||||
|
||||
```commandline
|
||||
# Get the controller's IP address.
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||
|
||||
# Get the hostname of the load balancer.
|
||||
|
||||
@@ -61,11 +61,11 @@ resource "aws_instance" "bastion-server" {
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", var.aws_cluster_name,
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||
Cluster = var.aws_cluster_name
|
||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
}))
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -84,14 +84,14 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
||||
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "master"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
@@ -113,11 +113,11 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "etcd"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
@@ -134,11 +134,11 @@ resource "aws_instance" "k8s-worker" {
|
||||
iam_instance_profile = module.aws-iam.kube-worker-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "worker"
|
||||
}))
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -2,9 +2,9 @@ resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
@@ -51,7 +51,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#Add AWS Roles for Kubernetes
|
||||
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
resource "aws_iam_role" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
@@ -40,9 +40,9 @@ EOF
|
||||
|
||||
#Add AWS Policies for Kubernetes
|
||||
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
resource "aws_iam_role_policy" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = aws_iam_role.kube-master.id
|
||||
role = aws_iam_role.kube_control_plane.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
@@ -130,9 +130,9 @@ EOF
|
||||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
resource "aws_iam_instance_profile" "kube_control_plane" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = aws_iam_role.kube-master.name
|
||||
role = aws_iam_role.kube_control_plane.name
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
output "kube-master-profile" {
|
||||
value = aws_iam_instance_profile.kube-master.name
|
||||
output "kube_control_plane-profile" {
|
||||
value = aws_iam_instance_profile.kube_control_plane.name
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
|
||||
@@ -5,9 +5,9 @@ resource "aws_vpc" "cluster-vpc" {
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
@@ -18,9 +18,9 @@ resource "aws_eip" "cluster-nat-eip" {
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
@@ -29,10 +29,10 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
@@ -47,9 +47,9 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
}))
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
@@ -64,9 +64,9 @@ resource "aws_route_table" "kubernetes-public" {
|
||||
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
@@ -78,9 +78,9 @@ resource "aws_route_table" "kubernetes-private" {
|
||||
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
@@ -101,9 +101,9 @@ resource "aws_security_group" "kubernetes" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
||||
@@ -7,11 +7,11 @@ ${public_ip_address_bastion}
|
||||
[bastion]
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_node}
|
||||
|
||||
|
||||
@@ -19,10 +19,10 @@ ${list_node}
|
||||
${list_etcd}
|
||||
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
|
||||
154
contrib/terraform/exoscale/README.md
Normal file
154
contrib/terraform/exoscale/README.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# Kubernetes on Exoscale with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Master/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| Ingress LB +---------> | | | |
|
||||
| | | | | Worker | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: *Assumes you are at the root of the kubespray repo*
|
||||
|
||||
Copy the sample inventory for your cluster and copy the default terraform variables.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-exoscale-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
|
||||
|
||||
```bash
|
||||
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
|
||||
$EDITOR default.tfvars
|
||||
```
|
||||
|
||||
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
|
||||
The file should look like something like this:
|
||||
|
||||
```ini
|
||||
[cloudstack]
|
||||
key = <API key>
|
||||
secret = <API secret>
|
||||
```
|
||||
|
||||
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
|
||||
|
||||
### Encrypted credentials
|
||||
|
||||
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
|
||||
|
||||
```bash
|
||||
cat << EOF > cloudstack.ini
|
||||
[cloudstack]
|
||||
key =
|
||||
secret =
|
||||
EOF
|
||||
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
|
||||
sops cloudstack.ini
|
||||
```
|
||||
|
||||
Run terraform to create the infrastructure
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
If your cloudstack credentials file is encrypted using sops, run the following:
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can now copy your inventory file and use it with kubespray to set up a cluster.
|
||||
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
Example to use this with the default sample inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
|
||||
|
||||
```bash
|
||||
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: The size to use
|
||||
* `boot_disk`: The boot disk to use
|
||||
* `image_name`: Name of the image
|
||||
* `root_partition_size`: Size *(in GB)* for the root partition
|
||||
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
|
||||
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
||||
## Known limitations
|
||||
|
||||
### Only single disk
|
||||
|
||||
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
|
||||
|
||||
### No Kubernetes API
|
||||
|
||||
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
|
||||
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.
|
||||
65
contrib/terraform/exoscale/default.tfvars
Normal file
65
contrib/terraform/exoscale/default.tfvars
Normal file
@@ -0,0 +1,65 @@
|
||||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
49
contrib/terraform/exoscale/main.tf
Normal file
49
contrib/terraform/exoscale/main.tf
Normal file
@@ -0,0 +1,49 @@
|
||||
provider "exoscale" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
193
contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf
Normal file
193
contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,193 @@
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
start_ip = cidrhost(var.private_network_cidr, 1)
|
||||
# cidr -1 = Broadcast address
|
||||
# cidr -2 = DHCP server address (exoscale specific)
|
||||
end_ip = cidrhost(var.private_network_cidr, -3)
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "master"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "worker"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
name = "${var.prefix}-worker-sg"
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_network_cidr
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
#cloud-config
|
||||
%{ if ceph_partition_size > 0 || node_local_partition_size > 0}
|
||||
bootcmd:
|
||||
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ]
|
||||
%{ if node_local_partition_size > 0 }
|
||||
# Create partition for node local storage
|
||||
- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ]
|
||||
- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ]
|
||||
%{ endif }
|
||||
%{ if ceph_partition_size > 0 }
|
||||
# Create partition for rook to use for ceph
|
||||
- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ]
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/eth1.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
eth1:
|
||||
dhcp4: true
|
||||
%{ if node_type == "worker" }
|
||||
# TODO: When a VM is seen as healthy and is added to the EIP loadbalancer
|
||||
# pool it no longer can send traffic back to itself via the EIP IP
|
||||
# address.
|
||||
# Remove this if it ever gets solved.
|
||||
- path: /etc/netplan/20-eip-fix.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
"lo:0":
|
||||
match:
|
||||
name: lo
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- ${eip_ip_address}/32
|
||||
%{ endif }
|
||||
runcmd:
|
||||
- netplan apply
|
||||
%{ if node_local_partition_size > 0 }
|
||||
- mkdir -p /mnt/disks/node-local-storage
|
||||
- chown nobody:nogroup /mnt/disks/node-local-storage
|
||||
- mount /dev/vda2 /mnt/disks/node-local-storage
|
||||
%{ endif }
|
||||
@@ -0,0 +1,42 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
# This is currently the only zone that is supposed to be supporting
|
||||
# so called "managed private networks".
|
||||
# See: https://www.exoscale.com/syslog/introducing-managed-private-networks
|
||||
default = "ch-gva-2"
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "172.0.10.0/24"
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
15
contrib/terraform/exoscale/output.tf
Normal file
15
contrib/terraform/exoscale/output.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = module.kubernetes.ingress_controller_lb_ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
@@ -0,0 +1,65 @@
|
||||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Small",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
19
contrib/terraform/exoscale/templates/inventory.tpl
Normal file
19
contrib/terraform/exoscale/templates/inventory.tpl
Normal file
@@ -0,0 +1,19 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[kube_control_plane:vars]
|
||||
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
46
contrib/terraform/exoscale/variables.tf
Normal file
46
contrib/terraform/exoscale/variables.tf
Normal file
@@ -0,0 +1,46 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
||||
15
contrib/terraform/exoscale/versions.tf
Normal file
15
contrib/terraform/exoscale/versions.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
@@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master]"
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master:vars]"
|
||||
echo "[kube_control_plane:vars]"
|
||||
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
@@ -65,12 +65,12 @@ for name in "${MASTER_NAMES[@]}"; do
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-node]"
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s-cluster:children]"
|
||||
echo "kube-master"
|
||||
echo "kube-node"
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
|
||||
@@ -263,12 +263,13 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
@@ -281,7 +282,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
Allows a custom defintion of worker nodes giving the operator full control over individual node flavor and
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
@@ -420,7 +421,7 @@ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||
`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
|
||||
be able to access your machines tunneling through the bastion's IP address. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
@@ -545,7 +546,7 @@ bin_dir: /opt/bin
|
||||
cloud_provider: openstack
|
||||
```
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
||||
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
|
||||
@@ -52,7 +52,11 @@ module "compute" {
|
||||
master_volume_type = var.master_volume_type
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_uuid = var.image_uuid
|
||||
image_gfs = var.image_gfs
|
||||
image_master = var.image_master
|
||||
image_master_uuid = var.image_master_uuid
|
||||
image_gfs_uuid = var.image_gfs_uuid
|
||||
ssh_user = var.ssh_user
|
||||
ssh_user_gfs = var.ssh_user_gfs
|
||||
flavor_k8s_master = var.flavor_k8s_master
|
||||
|
||||
@@ -1,11 +1,20 @@
|
||||
data "openstack_images_image_v2" "vm_image" {
|
||||
count = var.image_uuid == "" ? 1 : 0
|
||||
most_recent = true
|
||||
name = var.image
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "gfs_image" {
|
||||
count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
|
||||
most_recent = true
|
||||
name = var.image_gfs == "" ? var.image : var.image_gfs
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "image_master" {
|
||||
count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
@@ -149,21 +158,28 @@ locals {
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
# Image_gfs uuid
|
||||
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
||||
# image_master uuidimage_gfs_uuid
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
image_name = var.image
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.bastion_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -188,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,15 +212,15 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@@ -229,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,15 +259,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@@ -276,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,14 +306,14 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = var.number_of_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.etcd_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -321,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -331,14 +347,14 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@@ -363,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -373,14 +389,14 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@@ -405,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -415,14 +431,14 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -446,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,14 +476,14 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -491,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -501,14 +517,14 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -532,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -546,14 +562,14 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image_gfs
|
||||
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
||||
flavor_id = var.flavor_gfs_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_gfs
|
||||
source_type = "image"
|
||||
volume_size = var.gfs_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@@ -577,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
@@ -135,3 +135,19 @@ variable "extra_sec_groups" {
|
||||
variable "extra_sec_groups_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_gfs_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_master" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_master_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
||||
@@ -177,12 +177,12 @@ variable "external_net" {
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||
description = "supplementary kubespray ansible groups for masters, such kube_node"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "supplementary_node_groups" {
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress"
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -258,3 +258,23 @@ variable "extra_sec_groups" {
|
||||
variable "extra_sec_groups_name" {
|
||||
default = "custom"
|
||||
}
|
||||
|
||||
variable "image_uuid" {
|
||||
description = "uuid of image inside openstack to use"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_gfs_uuid" {
|
||||
description = "uuid of image to be used on gluster fs nodes. If empty defaults to image_uuid"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_master" {
|
||||
description = "uuid of image inside openstack to use"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_master_uuid" {
|
||||
description = "uuid of image to be used on master nodes. If empty defaults to image_uuid"
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ While the defaults in variables.tf will successfully deploy a cluster, it is rec
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
`\# kubeconfig_localhost: false`
|
||||
becomes:
|
||||
`kubeconfig_localhost: true`
|
||||
|
||||
@@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
@@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
@@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
||||
|
||||
@@ -323,7 +323,7 @@ def openstack_host(resource, module_name):
|
||||
})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('os_image=' + attrs['image']['name'])
|
||||
groups.append('os_image=' + attrs['image']['id'])
|
||||
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||
groups.extend('os_metadata_%s=%s' % item
|
||||
for item in list(attrs['metadata'].items()))
|
||||
|
||||
106
contrib/terraform/upcloud/README.md
Normal file
106
contrib/terraform/upcloud/README.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Kubernetes on UpCloud with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: Assumes you are at the root of the kubespray repo.
|
||||
|
||||
For authentication in your cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export TF_VAR_UPCLOUD_USERNAME=username
|
||||
export TF_VAR_UPCLOUD_PASSWORD=password
|
||||
```
|
||||
|
||||
To allow API access to your UpCloud account, you need to allow API connections by visiting [Account-page](https://hub.upcloud.com/account) in your UpCloud Hub.
|
||||
|
||||
Copy the cluster configuration file.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-upcloud-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/upcloud/cluster-settings.tfvars inventory/$CLUSTER/
|
||||
export ANSIBLE_CONFIG=ansible.cfg
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `cluster-settings.tfvars` to match your requirement.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/upcloud
|
||||
terraform apply --var-file cluster-settings.tfvars \
|
||||
-state=tfstate-$CLUSTER.tfstate \
|
||||
../../contrib/terraform/upcloud/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can use the inventory file with kubespray to set up a cluster.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
You can setup Kubernetes with kubespray using the generated inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file cluster-settings.tfvars \
|
||||
-state=tfstate-$CLUSTER.tfstate \
|
||||
../../contrib/terraform/upcloud/
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `hostname`: A valid domain name, e.g. example.com. The maximum length is 128 characters.
|
||||
* `template_name`: The name or UUID of a base image
|
||||
* `username`: a user to access the nodes
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `cpu`: number of cpu cores
|
||||
* `mem`: memory size in MB
|
||||
* `disk_size`: The size of the storage in GB
|
||||
58
contrib/terraform/upcloud/cluster-settings.tfvars
Normal file
58
contrib/terraform/upcloud/cluster-settings.tfvars
Normal file
@@ -0,0 +1,58 @@
|
||||
|
||||
# See: https://developers.upcloud.com/1.3/5-zones/
|
||||
zone = "fi-hel1"
|
||||
username = "ubuntu"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
# A valid domain name, e.g. host.example.com. The maximum length is 128 characters.
|
||||
hostname = "example.com"
|
||||
|
||||
# Set the operating system using UUID or exact name
|
||||
template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa public key 1",
|
||||
"ssh-rsa public key 2",
|
||||
]
|
||||
|
||||
#check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
}
|
||||
}
|
||||
55
contrib/terraform/upcloud/main.tf
Normal file
55
contrib/terraform/upcloud/main.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.13.0"
|
||||
}
|
||||
provider "upcloud" {
|
||||
# Your UpCloud credentials are read from environment variables:
|
||||
username = var.UPCLOUD_USERNAME
|
||||
password = var.UPCLOUD_PASSWORD
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
zone = var.zone
|
||||
hostname = var.hostname
|
||||
|
||||
template_name = var.template_name
|
||||
username = var.username
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip),
|
||||
values(module.kubernetes.master_ip),
|
||||
range(1, length(module.kubernetes.master_ip) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s",
|
||||
keys(module.kubernetes.worker_ip),
|
||||
values(module.kubernetes.worker_ip)))
|
||||
list_master = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.master_ip)))
|
||||
list_worker = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.worker_ip)))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
66
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
Normal file
66
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,66 @@
|
||||
|
||||
resource "upcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
hostname = "${each.key}.${var.hostname}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Network interfaces
|
||||
network_interface {
|
||||
type = "public"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
type = "utility"
|
||||
}
|
||||
# Include at least one public SSH key
|
||||
login {
|
||||
user = var.username
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "upcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
hostname = "${each.key}.${var.hostname}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Network interfaces
|
||||
network_interface {
|
||||
type = "public"
|
||||
}
|
||||
|
||||
# Include at least one public SSH key
|
||||
login {
|
||||
user = var.username
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
|
||||
output "master_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.master :
|
||||
instance.hostname => instance.network_interface[0].ip_address
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.worker :
|
||||
instance.hostname => instance.network_interface[0].ip_address
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "hostname"{
|
||||
default ="example.com"
|
||||
}
|
||||
|
||||
variable "template_name"{}
|
||||
|
||||
variable "username"{}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
8
contrib/terraform/upcloud/output.tf
Normal file
8
contrib/terraform/upcloud/output.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
output "master_ip" {
|
||||
value = module.kubernetes.master_ip
|
||||
}
|
||||
|
||||
output "worker_ip" {
|
||||
value = module.kubernetes.worker_ip
|
||||
}
|
||||
56
contrib/terraform/upcloud/sample-inventory/cluster.tfvars
Normal file
56
contrib/terraform/upcloud/sample-inventory/cluster.tfvars
Normal file
@@ -0,0 +1,56 @@
|
||||
# See: https://developers.upcloud.com/1.3/5-zones/
|
||||
zone = "fi-hel1"
|
||||
username = "ubuntu"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
# A valid domain name, e.g. host.example.com. The maximum length is 128 characters.
|
||||
hostname = "example.com"
|
||||
|
||||
# Set the operating system using UUID or exact name
|
||||
template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)"
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
}
|
||||
}
|
||||
1
contrib/terraform/upcloud/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/upcloud/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars/
|
||||
17
contrib/terraform/upcloud/templates/inventory.tpl
Normal file
17
contrib/terraform/upcloud/templates/inventory.tpl
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
35
contrib/terraform/upcloud/variables.tf
Normal file
35
contrib/terraform/upcloud/variables.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "hostname" {
|
||||
default = "example.com"
|
||||
}
|
||||
|
||||
variable "template_name" {}
|
||||
|
||||
variable "username" {}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
||||
|
||||
variable "UPCLOUD_USERNAME" {}
|
||||
|
||||
variable "UPCLOUD_PASSWORD" {}
|
||||
10
contrib/terraform/upcloud/versions.tf
Normal file
10
contrib/terraform/upcloud/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
117
contrib/terraform/vsphere/README.md
Normal file
117
contrib/terraform/vsphere/README.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Kubernetes on Exoscale with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/se/products/vsphere.html) using Terraform and Kubespray.
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following.
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Warning
|
||||
|
||||
This setup assumes that the DHCP is disabled in the vSphere cluster and IP addresses have to be provided in the configuration file.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: *Assumes you are at the root of the kubespray repo*
|
||||
|
||||
Copy the sample inventory for your cluster and copy the default terraform variables.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-vsphere-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/vsphere/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your setup. You MUST set values specific for you network and vSphere cluster.
|
||||
|
||||
```bash
|
||||
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
|
||||
$EDITOR default.tfvars
|
||||
```
|
||||
|
||||
For authentication in your vSphere cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export TF_VAR_vsphere_user=username
|
||||
export TF_VAR_vsphere_password=password
|
||||
```
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/vsphere
|
||||
terraform apply \
|
||||
-var-file default.tfvars \
|
||||
-state=tfstate-$CLUSTER.tfstate \
|
||||
../../contrib/terraform/vsphere
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can now copy your inventory file and use it with kubespray to set up a cluster.
|
||||
You can type `terraform output` to find out the IP addresses of the nodes.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
Example to use this with the default sample inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `ip`: The IP address with the netmask (CIDR notation)
|
||||
* `gateway`: The IP address of the network gateway
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `vsphere_datacenter`: The identifier of vSphere data center
|
||||
* `vsphere_compute_cluster`: The identifier of vSphere compute cluster
|
||||
* `vsphere_datastore`: The identifier of vSphere data store
|
||||
* `vsphere_server`: The address of vSphere server
|
||||
* `vsphere_hostname`: The IP address of vSphere hostname
|
||||
* `template_name`: The name of a base image (the image has to be uploaded to vSphere beforehand)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `dns_primary`: The IP address of primary DNS server *(Defaults to `8.8.4.4`)*
|
||||
* `dns_secondary`:The IP address of secondary DNS server *(Defaults to `8.8.8.8`)*
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
34
contrib/terraform/vsphere/default.tfvars
Normal file
34
contrib/terraform/vsphere/default.tfvars
Normal file
@@ -0,0 +1,34 @@
|
||||
prefix = "default"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
}
|
||||
}
|
||||
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
vsphere_datacenter = "i-did-not-read-the-docs"
|
||||
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
|
||||
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
|
||||
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
|
||||
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg
|
||||
116
contrib/terraform/vsphere/main.tf
Normal file
116
contrib/terraform/vsphere/main.tf
Normal file
@@ -0,0 +1,116 @@
|
||||
provider "vsphere" {
|
||||
# Username and password set through env vars VSPHERE_USER and VSPHERE_PASSWORD
|
||||
user = var.vsphere_user
|
||||
password = var.vsphere_password
|
||||
|
||||
vsphere_server = var.vsphere_server
|
||||
|
||||
# If you have a self-signed cert
|
||||
allow_unverified_ssl = true
|
||||
}
|
||||
|
||||
data "vsphere_datacenter" "dc" {
|
||||
name = var.vsphere_datacenter
|
||||
}
|
||||
|
||||
data "vsphere_datastore" "datastore" {
|
||||
name = var.vsphere_datastore
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_network" "network" {
|
||||
name = "VM Network"
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_host" "host" {
|
||||
name = var.vsphere_hostname
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_virtual_machine" "template" {
|
||||
name = var.template_name
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_compute_cluster" "compute_cluster" {
|
||||
name = var.vsphere_compute_cluster
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
resource "vsphere_resource_pool" "pool" {
|
||||
name = "${var.prefix}-cluster-pool"
|
||||
parent_resource_pool_id = data.vsphere_host.host.resource_pool_id
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
machines = var.machines
|
||||
|
||||
## Master ##
|
||||
master_cores = var.master_cores
|
||||
master_memory = var.master_memory
|
||||
master_disk_size = var.master_disk_size
|
||||
|
||||
## Worker ##
|
||||
worker_cores = var.worker_cores
|
||||
worker_memory = var.worker_memory
|
||||
worker_disk_size = var.worker_disk_size
|
||||
|
||||
## Global ##
|
||||
|
||||
gateway = var.gateway
|
||||
dns_primary = var.dns_primary
|
||||
dns_secondary = var.dns_secondary
|
||||
|
||||
pool_id = vsphere_resource_pool.pool.id
|
||||
datastore_id = data.vsphere_datastore.datastore.id
|
||||
|
||||
folder = ""
|
||||
guest_id = data.vsphere_virtual_machine.template.guest_id
|
||||
scsi_type = data.vsphere_virtual_machine.template.scsi_type
|
||||
network_id = data.vsphere_network.network.id
|
||||
adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0]
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
|
||||
|
||||
template_id = data.vsphere_virtual_machine.template.id
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip),
|
||||
values(module.kubernetes.master_ip),
|
||||
range(1, length(module.kubernetes.master_ip) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s",
|
||||
keys(module.kubernetes.worker_ip),
|
||||
values(module.kubernetes.worker_ip)))
|
||||
list_master = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.master_ip)))
|
||||
list_worker = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.worker_ip)))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
109
contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf
Normal file
109
contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,109 @@
|
||||
resource "vsphere_virtual_machine" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = each.key
|
||||
resource_pool_id = var.pool_id
|
||||
datastore_id = var.datastore_id
|
||||
|
||||
num_cpus = var.worker_cores
|
||||
memory = var.worker_memory
|
||||
memory_reservation = var.worker_memory
|
||||
guest_id = var.guest_id
|
||||
enable_disk_uuid = "true"
|
||||
scsi_type = var.scsi_type
|
||||
folder = var.folder
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
|
||||
wait_for_guest_net_routable = false
|
||||
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
adapter_type = var.adapter_type
|
||||
}
|
||||
|
||||
disk {
|
||||
label = "disk0"
|
||||
size = var.worker_disk_size
|
||||
thin_provisioned = var.disk_thin_provisioned
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [disk]
|
||||
}
|
||||
|
||||
clone {
|
||||
template_uuid = var.template_id
|
||||
}
|
||||
|
||||
cdrom {
|
||||
client_device = true
|
||||
}
|
||||
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "vsphere_virtual_machine" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = each.key
|
||||
resource_pool_id = var.pool_id
|
||||
datastore_id = var.datastore_id
|
||||
|
||||
num_cpus = var.master_cores
|
||||
memory = var.master_memory
|
||||
memory_reservation = var.master_memory
|
||||
guest_id = var.guest_id
|
||||
enable_disk_uuid = "true"
|
||||
scsi_type = var.scsi_type
|
||||
folder = var.folder
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
adapter_type = var.adapter_type
|
||||
}
|
||||
|
||||
disk {
|
||||
label = "disk0"
|
||||
size = var.master_disk_size
|
||||
thin_provisioned = var.disk_thin_provisioned
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [disk]
|
||||
}
|
||||
|
||||
clone {
|
||||
template_uuid = var.template_id
|
||||
}
|
||||
|
||||
cdrom {
|
||||
client_device = true
|
||||
}
|
||||
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
output "master_ip" {
|
||||
value = {
|
||||
for instance in vsphere_virtual_machine.master :
|
||||
instance.name => instance.default_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip" {
|
||||
value = {
|
||||
for instance in vsphere_virtual_machine.worker :
|
||||
instance.name => instance.default_ip_address
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
#cloud-config
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/20-internal-network.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
"lo:0":
|
||||
match:
|
||||
name: lo
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- 172.17.0.100/32
|
||||
- path: /etc/netplan/10-user-network.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: false #true to use dhcp
|
||||
addresses:
|
||||
- ${ip}
|
||||
gateway4: ${gw} # Set gw here
|
||||
nameservers:
|
||||
addresses:
|
||||
- ${dns} # Set DNS ip address here
|
||||
|
||||
runcmd:
|
||||
- netplan apply
|
||||
@@ -0,0 +1,38 @@
|
||||
## Global ##
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "gateway" {}
|
||||
variable "dns_primary" {}
|
||||
variable "dns_secondary" {}
|
||||
variable "pool_id" {}
|
||||
variable "datastore_id" {}
|
||||
variable "guest_id" {}
|
||||
variable "scsi_type" {}
|
||||
variable "network_id" {}
|
||||
variable "adapter_type" {}
|
||||
variable "disk_thin_provisioned" {}
|
||||
variable "template_id" {}
|
||||
variable "firmware" {}
|
||||
variable "folder" {}
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
variable "hardware_version" {}
|
||||
|
||||
## Master ##
|
||||
variable "master_cores" {}
|
||||
variable "master_memory" {}
|
||||
variable "master_disk_size" {}
|
||||
|
||||
## Worker ##
|
||||
variable "worker_cores" {}
|
||||
variable "worker_memory" {}
|
||||
variable "worker_disk_size" {}
|
||||
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
vsphere = {
|
||||
source = "hashicorp/vsphere"
|
||||
version = ">= 1.24.3"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
31
contrib/terraform/vsphere/output.tf
Normal file
31
contrib/terraform/vsphere/output.tf
Normal file
@@ -0,0 +1,31 @@
|
||||
output "master_ip_addresses" {
|
||||
value = module.kubernetes.master_ip
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = module.kubernetes.worker_ip
|
||||
}
|
||||
|
||||
output "vsphere_datacenter" {
|
||||
value = var.vsphere_datacenter
|
||||
}
|
||||
|
||||
output "vsphere_server" {
|
||||
value = var.vsphere_server
|
||||
}
|
||||
|
||||
output "vsphere_datastore" {
|
||||
value = var.vsphere_datastore
|
||||
}
|
||||
|
||||
output "vsphere_network" {
|
||||
value = var.network
|
||||
}
|
||||
|
||||
output "vsphere_folder" {
|
||||
value = terraform.workspace
|
||||
}
|
||||
|
||||
output "vsphere_pool" {
|
||||
value = "${terraform.workspace}-cluster-pool"
|
||||
}
|
||||
34
contrib/terraform/vsphere/sample-inventory/cluster.tfvars
Normal file
34
contrib/terraform/vsphere/sample-inventory/cluster.tfvars
Normal file
@@ -0,0 +1,34 @@
|
||||
prefix = "default"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
}
|
||||
}
|
||||
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
vsphere_datacenter = "i-did-not-read-the-docs"
|
||||
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
|
||||
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
|
||||
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
|
||||
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg
|
||||
1
contrib/terraform/vsphere/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/vsphere/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
17
contrib/terraform/vsphere/templates/inventory.tpl
Normal file
17
contrib/terraform/vsphere/templates/inventory.tpl
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
89
contrib/terraform/vsphere/variables.tf
Normal file
89
contrib/terraform/vsphere/variables.tf
Normal file
@@ -0,0 +1,89 @@
|
||||
## Global ##
|
||||
|
||||
variable "prefix" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
default = "inventory.ini"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
default = "VM Network"
|
||||
}
|
||||
|
||||
variable "gateway" {}
|
||||
|
||||
variable "dns_primary" {
|
||||
default = "8.8.4.4"
|
||||
}
|
||||
|
||||
variable "dns_secondary" {
|
||||
default = "8.8.8.8"
|
||||
}
|
||||
|
||||
variable "vsphere_datacenter" {}
|
||||
|
||||
variable "vsphere_compute_cluster" {}
|
||||
|
||||
variable "vsphere_datastore" {}
|
||||
|
||||
variable "vsphere_user" {}
|
||||
|
||||
variable "vsphere_password" {}
|
||||
|
||||
variable "vsphere_server" {}
|
||||
|
||||
variable "vsphere_hostname" {}
|
||||
|
||||
variable "firmware" {
|
||||
default = "bios"
|
||||
}
|
||||
|
||||
variable "hardware_version" {
|
||||
default = "15"
|
||||
}
|
||||
|
||||
variable "template_name" {
|
||||
default = "ubuntu-focal-20.04-cloudimg"
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
## Master ##
|
||||
|
||||
variable "master_cores" {
|
||||
default = 4
|
||||
}
|
||||
|
||||
variable "master_memory" {
|
||||
default = 4096
|
||||
}
|
||||
|
||||
variable "master_disk_size" {
|
||||
default = "20"
|
||||
}
|
||||
|
||||
## Worker ##
|
||||
|
||||
variable "worker_cores" {
|
||||
default = 16
|
||||
}
|
||||
|
||||
variable "worker_memory" {
|
||||
default = 8192
|
||||
}
|
||||
variable "worker_disk_size" {
|
||||
default = "100"
|
||||
}
|
||||
15
contrib/terraform/vsphere/versions.tf
Normal file
15
contrib/terraform/vsphere/versions.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
vsphere = {
|
||||
source = "hashicorp/vsphere"
|
||||
version = ">= 1.24.3"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
vault_downloads:
|
||||
vault:
|
||||
enabled: "{{ cert_management == 'vault' }}"
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
file: "{{ vault_deployment_type == 'host' }}"
|
||||
dest: "{{local_release_dir}}/vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
mode: "0755"
|
||||
owner: "vault"
|
||||
repo: "{{ vault_image_repo }}"
|
||||
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||
tag: "{{ vault_image_tag }}"
|
||||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
groups:
|
||||
- vault
|
||||
|
||||
# Vault data dirs.
|
||||
vault_base_dir: /etc/vault
|
||||
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||
kube_vault_mount_path: "/kube"
|
||||
etcd_vault_mount_path: "/etcd"
|
||||
@@ -1 +0,0 @@
|
||||
ansible-modules-hashivault>=3.9.4
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
- include_tasks: sync_etcd_master_certs.yml
|
||||
when: inventory_hostname in groups.etcd
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
- include_tasks: sync_etcd_node_certs.yml
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
# Issue master certs to Etcd nodes
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ groups['etcd'] + ['localhost'] + (etcd_cert_alt_names)|default() }}"
|
||||
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups.etcd }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups.etcd -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- for cert_alt_ip in etcd_cert_alt_ips -%}
|
||||
"{{ cert_alt_ip }}",
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: etcd
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||
with_items: "{{ etcd_master_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups.etcd
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
# Issue node certs to everyone else
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
|
||||
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in etcd_node_cert_hosts -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: etcd
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: gen_certs_vault | ensure file permissions
|
||||
shell: >-
|
||||
find {{etcd_cert_dir }} -type d -exec chmod 0755 {} \; &&
|
||||
find {{etcd_cert_dir }} -type f -exec chmod 0640 {} \;
|
||||
changed_when: false
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
|
||||
- name: sync_etcd_master_certs | Create list of master certs needing creation
|
||||
set_fact:
|
||||
etcd_master_cert_list: >-
|
||||
{{ etcd_master_cert_list|default([]) + [
|
||||
"admin-" + inventory_hostname + ".pem",
|
||||
"member-" + inventory_hostname + ".pem"
|
||||
] }}
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_master_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
etcd_master_certs_needed: "{{ etcd_master_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
|
||||
- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
|
||||
- name: sync_etcd_node_certs | Create list of node certs needing creation
|
||||
set_fact:
|
||||
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_node_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
etcd_node_certs_needed: "{{ etcd_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups['etcd'] }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
@@ -1,134 +0,0 @@
|
||||
---
|
||||
- import_tasks: sync_kube_master_certs.yml
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- import_tasks: sync_kube_node_certs.yml
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue admin certs to kube-master hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "admin"
|
||||
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-master
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_admin_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- name: gen_certs_vault | Set fact about certificate alt names
|
||||
set_fact:
|
||||
kube_cert_alt_names: >-
|
||||
{{
|
||||
groups['kube-master'] +
|
||||
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
|
||||
['localhost']
|
||||
}}
|
||||
run_once: true
|
||||
|
||||
- name: gen_certs_vault | Add external load balancer domain name to certificate alt names
|
||||
set_fact:
|
||||
kube_cert_alt_names: "{{ kube_cert_alt_names + [apiserver_loadbalancer_domain_name] }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
run_once: true
|
||||
|
||||
# Issue master components certs to kube-master hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "kubernetes"
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_run_once: true
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if supplementary_addresses_in_ssl_keys is defined -%}
|
||||
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
|
||||
"{{ ip_item }}",
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-master
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
notify: set secret_changed
|
||||
|
||||
# Issue node certs to k8s-cluster nodes
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
# Need to strip out the 'node-' prefix from the cert name so it can be used
|
||||
# with the node authorization plugin ( CN matches kubelet node name )
|
||||
issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] | regex_replace('^node-', '') }}"
|
||||
issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-node
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue proxy certs to k8s-cluster nodes
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "system:kube-proxy"
|
||||
issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-proxy
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue front proxy cert to kube-master hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "front-proxy-client"
|
||||
issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
|
||||
issue_cert_ca_filename: front-proxy-ca.pem
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if supplementary_addresses_in_ssl_keys is defined -%}
|
||||
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
|
||||
"{{ ip_item }}",
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: front-proxy-client
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
notify: set secret_changed
|
||||
@@ -1,89 +0,0 @@
|
||||
---
|
||||
|
||||
- name: sync_kube_master_certs | Create list of needed kube admin certs
|
||||
set_fact:
|
||||
kube_admin_cert_list: "{{ kube_admin_cert_list|d([]) + ['admin-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_admin_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for kube admin sync_file results
|
||||
set_fact:
|
||||
kube_admin_certs_needed: "{{ kube_admin_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after kube admin certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for kube master components sync_file results
|
||||
set_fact:
|
||||
kube_master_components_certs_needed: "{{ kube_master_components_certs_needed|d([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after kube master components cert
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: front-proxy-ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["front-proxy-client.pem"]
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for front-proxy-client certs sync_file results
|
||||
set_fact:
|
||||
kube_front_proxy_clients_certs_needed: "{{ kube_front_proxy_clients_certs_needed|d([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-client sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
@@ -1,60 +0,0 @@
|
||||
---
|
||||
|
||||
- name: sync_kube_node_certs | Create list of needed certs
|
||||
set_fact:
|
||||
kube_node_cert_list: "{{ kube_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_node_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube-master sync_file results
|
||||
set_fact:
|
||||
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after kube node certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- name: sync_kube_node_certs | Create list of needed kube-proxy certs
|
||||
set_fact:
|
||||
kube_proxy_cert_list: "{{ kube_proxy_cert_list|default([]) + ['kube-proxy-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_proxy_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube-proxy sync_file results
|
||||
set_fact:
|
||||
kube_proxy_certs_needed: "{{ kube_proxy_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after kube proxy certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
vault_bootstrap: false
|
||||
vault_deployment_type: docker
|
||||
|
||||
vault_adduser_vars:
|
||||
comment: "Hashicorp Vault User"
|
||||
createhome: no
|
||||
name: vault
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
|
||||
# This variables redefined in kubespray-defaults for using shared tasks
|
||||
# in etcd and kubernetes/secrets roles
|
||||
vault_base_dir: /etc/vault
|
||||
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||
vault_lib_dir: "/var/lib/vault"
|
||||
vault_log_dir: "/var/log/vault"
|
||||
|
||||
vault_version: 0.10.1
|
||||
vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
|
||||
# hvac==0.7.0 is broken at the moment
|
||||
hvac_version: 0.6.4
|
||||
|
||||
# Arch of Docker images and needed packages
|
||||
image_arch: "{{host_architecture}}"
|
||||
|
||||
vault_download_vars:
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
dest: "vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
enabled: true
|
||||
mode: "0755"
|
||||
owner: "vault"
|
||||
repo: "{{ vault_image_repo }}"
|
||||
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||
source_url: "{{ vault_download_url }}"
|
||||
tag: "{{ vault_image_tag }}"
|
||||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
|
||||
vault_container_name: kube-hashicorp-vault
|
||||
vault_temp_container_name: vault-temp
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
|
||||
vault_bind_address: 0.0.0.0
|
||||
vault_port: 8200
|
||||
vault_etcd_url: "{{ etcd_access_addresses }}"
|
||||
|
||||
# By default lease
|
||||
vault_default_lease_ttl: 70080h
|
||||
vault_max_lease_ttl: 87600h
|
||||
|
||||
vault_temp_config:
|
||||
backend:
|
||||
file:
|
||||
path: /vault/file
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
listener:
|
||||
tcp:
|
||||
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
||||
tls_disable: "true"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
|
||||
vault_config:
|
||||
backend:
|
||||
etcd:
|
||||
address: "{{ vault_etcd_url }}"
|
||||
ha_enabled: "true"
|
||||
redirect_addr: "https://{{ inventory_hostname }}:{{ vault_port }}"
|
||||
tls_ca_file: "{{ etcd_cert_dir }}/ca.pem"
|
||||
tls_cert_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem"
|
||||
tls_key_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}-key.pem"
|
||||
cluster_name: "kubernetes-vault"
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
ui: "true"
|
||||
listener:
|
||||
tcp:
|
||||
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
||||
tls_cert_file: "{{ vault_cert_dir }}/api.pem"
|
||||
tls_key_file: "{{ vault_cert_dir }}/api-key.pem"
|
||||
|
||||
vault_secret_shares: 1
|
||||
vault_secret_threshold: 1
|
||||
|
||||
vault_successful_http_codes: ["200", "429", "500", "501", "503"]
|
||||
|
||||
vault_ca_options:
|
||||
vault:
|
||||
common_name: vault
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault"
|
||||
etcd:
|
||||
common_name: etcd
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
kube:
|
||||
common_name: kube
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
|
||||
vault_client_headers:
|
||||
Accept: "application/json"
|
||||
Content-Type: "application/json"
|
||||
|
||||
etcd_cert_dir: /etc/ssl/etcd/ssl
|
||||
kube_cert_dir: /etc/kubernetes/ssl
|
||||
|
||||
vault_pki_mounts:
|
||||
userpass:
|
||||
name: userpass
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Userpass"
|
||||
cert_dir: "{{ vault_cert_dir }}"
|
||||
roles:
|
||||
- name: userpass
|
||||
group: userpass
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/userpass.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
|
||||
vault:
|
||||
name: vault
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Vault Root CA"
|
||||
cert_dir: "{{ vault_cert_dir }}"
|
||||
roles:
|
||||
- name: vault
|
||||
group: vault
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/vault.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
etcd:
|
||||
name: etcd
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Etcd Root CA"
|
||||
cert_dir: "{{ etcd_cert_dir }}"
|
||||
roles:
|
||||
- name: etcd
|
||||
group: etcd
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/etcd.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "kube:etcd"
|
||||
kube:
|
||||
name: kube
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Kubernetes Root CA"
|
||||
cert_dir: "{{ kube_cert_dir }}"
|
||||
roles:
|
||||
- name: kube-master
|
||||
group: kube-master
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:masters"
|
||||
- name: front-proxy-client
|
||||
group: kube-master
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:front-proxy-client"
|
||||
- name: kube-node
|
||||
group: k8s-cluster
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-node.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:nodes"
|
||||
- name: kube-proxy
|
||||
group: k8s-cluster
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:node-proxier"
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
- name: restart vault
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart vault service
|
||||
- wait for vault up
|
||||
- unseal vault
|
||||
|
||||
- name: wait for vault up
|
||||
uri:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
|
||||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
until: vault_health_check is succeeded
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: yes
|
||||
notify: set facts about local Vault health
|
||||
|
||||
- name: wait for vault up nowait
|
||||
uri:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
|
||||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
run_once: yes
|
||||
failed_when: false
|
||||
notify: set facts about local Vault health
|
||||
|
||||
- name: set facts about local Vault health
|
||||
set_fact:
|
||||
vault_is_running: "{{ vault_health_check.get('status', '-1') in vault_successful_http_codes }}"
|
||||
vault_cluster_is_initialized: "{{ vault_health_check.get('json', {}).get('initialized', false) }}"
|
||||
vault_is_sealed: "{{ vault_health_check.get('json', {}).get('sealed', true) }}"
|
||||
|
||||
- name: restart vault service
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
enabled: yes
|
||||
name: vault
|
||||
state: restarted
|
||||
|
||||
- name: unseal vault
|
||||
hashivault_unseal:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
keys: "{{ item }}"
|
||||
with_items: "{{ vault_unseal_keys|default([]) }}"
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: adduser
|
||||
user: "{{ vault_adduser_vars }}"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user