mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
307 Commits
floryut-pa
...
v2.19.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f65e6d3b5 | ||
|
|
9bf7aaf6cd | ||
|
|
5512465b34 | ||
|
|
2f30ab558a | ||
|
|
5c136ae3af | ||
|
|
c927da00e0 | ||
|
|
1600fd9082 | ||
|
|
14acd124bc | ||
|
|
e3cbbfb9ed | ||
|
|
5f21e0b58b | ||
|
|
d22204a59f | ||
|
|
90289b8502 | ||
|
|
78aacee21b | ||
|
|
f47aca3558 | ||
|
|
73fc70dbe8 | ||
|
|
dc2a18e436 | ||
|
|
82590eb087 | ||
|
|
4c97ce747c | ||
|
|
ebbc5ed0ce | ||
|
|
dc1af5a9c5 | ||
|
|
85bd1eea27 | ||
|
|
2b151c6aa2 | ||
|
|
93fe3e06ef | ||
|
|
9d3a894991 | ||
|
|
0e6b727e53 | ||
|
|
e42a01f203 | ||
|
|
a28b58dbd0 | ||
|
|
a26a9ee14f | ||
|
|
c09fcd4f92 | ||
|
|
593359ec77 | ||
|
|
34ec4d5d40 | ||
|
|
3d8f3bc0b7 | ||
|
|
eea7bb7692 | ||
|
|
3a89e31dee | ||
|
|
0c504e4984 | ||
|
|
0bf070c33b | ||
|
|
dc8ad78206 | ||
|
|
48e938660d | ||
|
|
632d457f78 | ||
|
|
569a319ff5 | ||
|
|
47812ec002 | ||
|
|
c27dee57ea | ||
|
|
b289f533b3 | ||
|
|
3eb0a4071a | ||
|
|
5684610a55 | ||
|
|
f26f544ff6 | ||
|
|
b9e5b0cb53 | ||
|
|
13443b05a6 | ||
|
|
e70c00a0fe | ||
|
|
bb67b654c5 | ||
|
|
aef25819bc | ||
|
|
1d96f465f4 | ||
|
|
8f618ab408 | ||
|
|
5296d7ef9c | ||
|
|
b715500b48 | ||
|
|
37a5271f5a | ||
|
|
42fc71fafa | ||
|
|
02b6e4833a | ||
|
|
323a111362 | ||
|
|
e7df4d3dd9 | ||
|
|
3e52a0db95 | ||
|
|
94484873d1 | ||
|
|
0d6ea85167 | ||
|
|
674ec92224 | ||
|
|
e7e5037a86 | ||
|
|
fbcf426240 | ||
|
|
2301554e98 | ||
|
|
5bc35002ba | ||
|
|
9143810a4d | ||
|
|
8f118fb619 | ||
|
|
1113460b68 | ||
|
|
74c7e009b7 | ||
|
|
c20ab7d987 | ||
|
|
fe66121287 | ||
|
|
9605bbaa67 | ||
|
|
b7ce6a9f79 | ||
|
|
c04a73c11a | ||
|
|
f184725c5f | ||
|
|
26a0b0f1e8 | ||
|
|
fa1d222eee | ||
|
|
56cf163a23 | ||
|
|
afcedf6d77 | ||
|
|
21fc197ee0 | ||
|
|
fcb4c8fb61 | ||
|
|
b6e2c56ae6 | ||
|
|
b005985d4e | ||
|
|
1294fd5730 | ||
|
|
835fd86a08 | ||
|
|
b7004d72c5 | ||
|
|
eb566ca626 | ||
|
|
aa12f1c56b | ||
|
|
6cc5b38a2e | ||
|
|
e6c4330e4e | ||
|
|
1e827f9807 | ||
|
|
a4f26dc8f3 | ||
|
|
3f065918d9 | ||
|
|
2c2d4513ac | ||
|
|
937e64d296 | ||
|
|
3261d26181 | ||
|
|
c98a0a448f | ||
|
|
7e7218f5ce | ||
|
|
45262da726 | ||
|
|
aef5f1e139 | ||
|
|
3d4baea01c | ||
|
|
30306d6ec7 | ||
|
|
d7254eead6 | ||
|
|
9dced7133c | ||
|
|
c2fb1a0747 | ||
|
|
00a4d2d3c4 | ||
|
|
424ef3b3f9 | ||
|
|
996ef98b87 | ||
|
|
19d5a1c7c3 | ||
|
|
0481dd946f | ||
|
|
29109575f5 | ||
|
|
3782573ede | ||
|
|
bba91a7524 | ||
|
|
b67cadf743 | ||
|
|
56dda4392c | ||
|
|
34fec09ff1 | ||
|
|
cefd1339fc | ||
|
|
b915376194 | ||
|
|
455cc6ff75 | ||
|
|
cc9c376d0f | ||
|
|
018611f829 | ||
|
|
1781eab21f | ||
|
|
78b05d0ffc | ||
|
|
1c0df78278 | ||
|
|
6cc9da6b0a | ||
|
|
6af9cae0a5 | ||
|
|
ef29455652 | ||
|
|
503ab0f722 | ||
|
|
90883e76af | ||
|
|
113de8381c | ||
|
|
652f2edbe1 | ||
|
|
a67e36703f | ||
|
|
73c6943402 | ||
|
|
d46817d690 | ||
|
|
97cb64c62d | ||
|
|
3f70241fb7 | ||
|
|
21b71b38a3 | ||
|
|
b2f9442aba | ||
|
|
fa9f85c7e9 | ||
|
|
ffa285c2e7 | ||
|
|
7b1dc600d5 | ||
|
|
5e67ebeb9e | ||
|
|
af7066d33c | ||
|
|
dd2d95ecdf | ||
|
|
a86d9bd8e8 | ||
|
|
21b1516d80 | ||
|
|
4c15038194 | ||
|
|
538f9df5cc | ||
|
|
efb0412b63 | ||
|
|
5a486a5cca | ||
|
|
394857b5ce | ||
|
|
5043517cfb | ||
|
|
307d122a84 | ||
|
|
d444a2fb83 | ||
|
|
fb7c56e3d3 | ||
|
|
2b79be68e7 | ||
|
|
512d5e3348 | ||
|
|
4b6892ece9 | ||
|
|
5a49ac52f9 | ||
|
|
db1e30e4fc | ||
|
|
b4a61370c8 | ||
|
|
58b2f39ce5 | ||
|
|
56d882abed | ||
|
|
39acb2b84d | ||
|
|
3ccba08983 | ||
|
|
632aa764e6 | ||
|
|
f6342b6cf4 | ||
|
|
471585dcd5 | ||
|
|
51821a811f | ||
|
|
299a9ae7ba | ||
|
|
bf7a506f79 | ||
|
|
2e925f82ef | ||
|
|
ddef7e1139 | ||
|
|
672e47a7eb | ||
|
|
3e8e64a3e5 | ||
|
|
b554246502 | ||
|
|
6d683c98a3 | ||
|
|
ee079f4740 | ||
|
|
a090038d02 | ||
|
|
4f1499bd23 | ||
|
|
36393d77d3 | ||
|
|
e053ee4272 | ||
|
|
1d46c07307 | ||
|
|
f9b5e448c1 | ||
|
|
3effb008c9 | ||
|
|
a088f492f4 | ||
|
|
e9c8913248 | ||
|
|
b9a27c91da | ||
|
|
d4f654275b | ||
|
|
f6eb4c749d | ||
|
|
418fc00718 | ||
|
|
2537177929 | ||
|
|
9af719bf99 | ||
|
|
9e020b252e | ||
|
|
cc45e365ae | ||
|
|
97c667f67c | ||
|
|
063fc525b1 | ||
|
|
0f73d87509 | ||
|
|
402e85ad6e | ||
|
|
1d635e04e4 | ||
|
|
98d5d0cdd5 | ||
|
|
31d4a38f09 | ||
|
|
1ebe456f2d | ||
|
|
c6e5314fab | ||
|
|
a6a79883b7 | ||
|
|
b02e68222f | ||
|
|
da8522af64 | ||
|
|
84b93090a8 | ||
|
|
5695c892d0 | ||
|
|
696101a910 | ||
|
|
54dfe73d24 | ||
|
|
87928baa31 | ||
|
|
6a4fd33a03 | ||
|
|
790448f48b | ||
|
|
7759494c85 | ||
|
|
aed187e56c | ||
|
|
eac799f589 | ||
|
|
5ecb07b59a | ||
|
|
ff621fb7f1 | ||
|
|
958bca8800 | ||
|
|
eacd55fbca | ||
|
|
0e2ab5c273 | ||
|
|
c47634290e | ||
|
|
92d612c3e0 | ||
|
|
2bbe5732b7 | ||
|
|
e6e7fbc25f | ||
|
|
7d4d554436 | ||
|
|
d31db847b7 | ||
|
|
3562d3378b | ||
|
|
ababcd5481 | ||
|
|
7caffde0b6 | ||
|
|
c40b43de01 | ||
|
|
b0eb5650da | ||
|
|
52f221f976 | ||
|
|
26a5948d2a | ||
|
|
d86a3b962c | ||
|
|
d64b341b38 | ||
|
|
d580014c66 | ||
|
|
be9a1f80c1 | ||
|
|
73ff3b0d3b | ||
|
|
9fce9ca42a | ||
|
|
f1adb734e3 | ||
|
|
575e0ca457 | ||
|
|
69f088bb82 | ||
|
|
ef34f5fe7d | ||
|
|
e88aa7c96b | ||
|
|
38d129a0b6 | ||
|
|
392815d97c | ||
|
|
6e2e61012a | ||
|
|
e791089466 | ||
|
|
418f12f62a | ||
|
|
caff539ccd | ||
|
|
c0d1bb1a5c | ||
|
|
ea44d64511 | ||
|
|
1a69f8c3ad | ||
|
|
ccd3180a69 | ||
|
|
01dcbc18ac | ||
|
|
7c67ec4976 | ||
|
|
43d128362f | ||
|
|
1337c9c244 | ||
|
|
86953b2ac4 | ||
|
|
135c9b29a7 | ||
|
|
e0d67367ed | ||
|
|
d007132655 | ||
|
|
cfd9873bbc | ||
|
|
b2b95cc8f9 | ||
|
|
73c889eb10 | ||
|
|
642725efe7 | ||
|
|
29aafff2ce | ||
|
|
df425ac143 | ||
|
|
57a1d18db3 | ||
|
|
aa4a3d7afd | ||
|
|
06ad5525b8 | ||
|
|
f80fd24a55 | ||
|
|
51bd9bee0d | ||
|
|
52266406f8 | ||
|
|
cd601c77c7 | ||
|
|
6abae713f7 | ||
|
|
1312f92a8d | ||
|
|
92abf26d29 | ||
|
|
c11e4ba9a7 | ||
|
|
7ae00947f5 | ||
|
|
59f62473c9 | ||
|
|
8fbd08d027 | ||
|
|
dda557ed23 | ||
|
|
cb54eb40ce | ||
|
|
3eab1129b9 | ||
|
|
24f1402a14 | ||
|
|
bf00550388 | ||
|
|
78c83a8f26 | ||
|
|
e72f8e0412 | ||
|
|
6136fa7c49 | ||
|
|
8d2b4ed4a9 | ||
|
|
9e9b177674 | ||
|
|
4c4c83f0a1 | ||
|
|
0e98814732 | ||
|
|
92f25bf267 | ||
|
|
63a53c79d0 | ||
|
|
2f9a8c04dc | ||
|
|
8c67f42689 | ||
|
|
783a51e9ac | ||
|
|
841c61aaa1 | ||
|
|
157942a462 | ||
|
|
e88a27790c |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -102,7 +102,6 @@ ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
roles/**/molecule/**/*.conf
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
@@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.17.1
|
||||
KUBESPRAY_VERSION: v2.18.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -27,6 +27,7 @@ variables:
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
@@ -80,3 +81,4 @@ include:
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
||||
@@ -23,9 +23,8 @@ ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
script:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
@@ -53,7 +52,7 @@ tox-inventory-builder:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
||||
93
.gitlab-ci/molecule.yml
Normal file
93
.gitlab-ci/molecule.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
molecule_cri-dockerd:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
||||
@@ -31,18 +31,26 @@ packet_ubuntu20-calico-aio:
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants
|
||||
# Exericse ansible variants during the nightly jobs
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
@@ -70,7 +78,7 @@ packet_centos7-flannel-addons-ha:
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
packet_centos8-crio:
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
@@ -100,16 +108,6 @@ packet_ubuntu16-flannel-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
@@ -145,17 +143,17 @@ packet_centos7-calico-ha-once-localhost:
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
packet_almalinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos8-docker:
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -165,11 +163,6 @@ packet_fedora34-docker-weave:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
@@ -203,7 +196,7 @@ packet_ubuntu18-flannel-ha-once:
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_centos8-calico-ha-ebpf:
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -218,11 +211,6 @@ packet_centos7-calico-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@@ -255,7 +243,7 @@ packet_amazon-linux-2-aio:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos8-calico-nodelocaldns-secondary:
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -275,6 +263,13 @@ packet_centos7-docker-weave-upgrade-ha:
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
@@ -288,6 +283,14 @@ packet_debian10-calico-upgrade:
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_debian10-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
|
||||
@@ -60,11 +60,11 @@ tf-validate-openstack:
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
tf-validate-metal:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: packet
|
||||
PROVIDER: metal
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
|
||||
@@ -1,28 +1,5 @@
|
||||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
@@ -38,7 +15,7 @@ molecule_tests:
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
@@ -66,3 +43,24 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,5 +1,9 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220316
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
@@ -8,7 +12,7 @@ RUN apt update -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
@@ -28,6 +32,6 @@ RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
@@ -4,10 +4,10 @@ aliases:
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
@@ -15,7 +15,9 @@ aliases:
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- woopstar
|
||||
|
||||
44
README.md
44
README.md
@@ -19,10 +19,10 @@ To deploy the cluster you can use :
|
||||
|
||||
#### Usage
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
@@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.17.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.18.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.17.1 bash
|
||||
quay.io/kubespray/kubespray:v2.18.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -75,10 +75,11 @@ python -V && pip -V
|
||||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
Install the necessary requirements
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
@@ -110,6 +111,7 @@ vagrant up
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
@@ -131,27 +133,27 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.22.5
|
||||
- [etcd](https://github.com/coreos/etcd) v3.5.0
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.5.8
|
||||
- [containerd](https://containerd.io/) v1.6.4
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.20.3
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.9.11
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.14.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.8.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.3.2
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.5.4
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.0.4
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@@ -160,8 +162,8 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.20**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- **Minimum required version of Kubernetes is v1.21**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
|
||||
33
RELEASE.md
33
RELEASE.md
@@ -2,17 +2,18 @@
|
||||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
@@ -46,3 +47,17 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
19
Vagrantfile
vendored
19
Vagrantfile
vendored
@@ -26,9 +26,11 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -53,7 +55,7 @@ $subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
@@ -68,9 +70,12 @@ $kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
|
||||
@@ -167,7 +172,7 @@ Vagrant.configure("2") do |config|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -238,9 +243,11 @@ Vagrant.configure("2") do |config|
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
@@ -250,7 +257,9 @@ Vagrant.configure("2") do |config|
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
|
||||
@@ -14,7 +14,7 @@ fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
callback_whitelist = profile_tasks,ara_default
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.12.0
|
||||
maximal_ansible_version: 2.13.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@@ -59,7 +59,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@@ -118,7 +118,8 @@
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
||||
@@ -47,6 +47,10 @@ If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
@@ -59,6 +63,5 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
@@ -83,11 +83,15 @@ class KubesprayInventory(object):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
@@ -105,6 +109,10 @@ class KubesprayInventory(object):
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from test import support
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@@ -26,6 +27,28 @@ if path not in sys.path:
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with support.captured_stdout() as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.0rc1
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
@@ -38,7 +38,12 @@
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
path: ansible.cfg
|
||||
section: defaults
|
||||
option: strategy
|
||||
value: mitogen_linear
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
@@ -3,6 +3,7 @@
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
|
||||
@@ -9,7 +9,8 @@ This script has two features:
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment.
|
||||
to the target offline environment. if images are from a private registry,
|
||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
@@ -28,16 +29,19 @@ manage-offline-container-images.sh register
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
```shell
|
||||
bash generate_list.sh
|
||||
./generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── generate.sh
|
||||
└── images.list
|
||||
0 directories, 3 files
|
||||
├── files.list.template
|
||||
├── images.list
|
||||
└── images.list.template
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
56
contrib/offline/generate_list.sh
Normal file → Executable file
56
contrib/offline/generate_list.sh
Normal file → Executable file
@@ -5,53 +5,29 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${IMAGE_ARCH:="amd64"}
|
||||
: ${ANSIBLE_SYSTEM:="linux"}
|
||||
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
|
||||
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
|
||||
|
||||
cat > ${TEMP_DIR}/generate.sh << EOF
|
||||
arch=${ARCH}
|
||||
image_arch=${IMAGE_ARCH}
|
||||
ansible_system=${ANSIBLE_SYSTEM}
|
||||
ansible_architecture=${ANSIBLE_ARCHITECTURE}
|
||||
EOF
|
||||
|
||||
# generate all component version by $DOWNLOAD_YML
|
||||
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
|
||||
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
|
||||
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all download files url
|
||||
# generate all download files url template
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
|
||||
|
||||
# generate all images list
|
||||
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
# generate all images list template
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
|
||||
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# add kube-* images to images list
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
|
||||
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# print files.list and images.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list
|
||||
# run ansible to expand templates
|
||||
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
|
||||
|
||||
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1
|
||||
|
||||
19
contrib/offline/generate_list.yml
Normal file
19
contrib/offline/generate_list.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
- role: kubespray-defaults
|
||||
when: false
|
||||
- role: download
|
||||
when: false
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
@@ -54,7 +54,8 @@ function create_container_image_tar() {
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
@@ -152,7 +153,8 @@ else
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "the gotten images to the target offline environment. if images are from"
|
||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
|
||||
@@ -20,20 +20,20 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_cidr_block = var.aws_vpc_cidr_block
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_cidr_subnets_private = var.aws_cidr_subnets_private
|
||||
aws_cidr_subnets_public = var.aws_cidr_subnets_public
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-elb" {
|
||||
source = "./modules/elb"
|
||||
module "aws-nlb" {
|
||||
source = "./modules/nlb"
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_id = module.aws-vpc.aws_vpc_id
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
|
||||
aws_elb_api_port = var.aws_elb_api_port
|
||||
aws_nlb_api_port = var.aws_nlb_api_port
|
||||
k8s_secure_api_port = var.k8s_secure_api_port
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
@@ -54,7 +54,6 @@ resource "aws_instance" "bastion-server" {
|
||||
instance_type = var.aws_bastion_size
|
||||
count = var.aws_bastion_num
|
||||
associate_public_ip_address = true
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
@@ -79,8 +78,7 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
count = var.aws_kube_master_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@@ -98,10 +96,10 @@ resource "aws_instance" "k8s-master" {
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
elb = module.aws-elb.aws_elb_api_id
|
||||
instance = element(aws_instance.k8s-master.*.id, count.index)
|
||||
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn
|
||||
target_id = element(aws_instance.k8s-master.*.private_ip, count.index)
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
@@ -110,8 +108,7 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
count = var.aws_etcd_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@@ -134,8 +131,7 @@ resource "aws_instance" "k8s-worker" {
|
||||
|
||||
count = var.aws_kube_worker_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@@ -168,7 +164,7 @@ data "template_file" "inventory" {
|
||||
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = var.aws_elb_api_port
|
||||
to_port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = var.aws_subnet_ids_public
|
||||
security_groups = [aws_security_group.aws-elb.id]
|
||||
|
||||
listener {
|
||||
instance_port = var.k8s_secure_api_port
|
||||
instance_protocol = "tcp"
|
||||
lb_port = var.aws_elb_api_port
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
cross_zone_load_balancing = true
|
||||
idle_timeout = 400
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}))
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "aws_elb_api_id" {
|
||||
value = aws_elb.aws-elb-api.id
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = aws_elb.aws-elb-api.dns_name
|
||||
}
|
||||
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
# Create a new AWS NLB for K8S API
|
||||
resource "aws_lb" "aws-nlb-api" {
|
||||
name = "kubernetes-nlb-${var.aws_cluster_name}"
|
||||
load_balancer_type = "network"
|
||||
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
|
||||
idle_timeout = 400
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
|
||||
}))
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Instance Target Group
|
||||
resource "aws_lb_target_group" "aws-nlb-api-tg" {
|
||||
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
|
||||
port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
target_type = "ip"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
interval = 30
|
||||
protocol = "HTTPS"
|
||||
path = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Listener listen to target group
|
||||
resource "aws_lb_listener" "aws-nlb-api-listener" {
|
||||
load_balancer_arn = aws_lb.aws-nlb-api.arn
|
||||
port = var.aws_nlb_api_port
|
||||
protocol = "TCP"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
}
|
||||
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "aws_nlb_api_id" {
|
||||
value = aws_lb.aws-nlb-api.id
|
||||
}
|
||||
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = aws_lb.aws-nlb-api.dns_name
|
||||
}
|
||||
|
||||
output "aws_nlb_api_tg_arn" {
|
||||
value = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
@@ -6,8 +6,8 @@ variable "aws_vpc_id" {
|
||||
description = "AWS VPC ID"
|
||||
}
|
||||
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
@@ -25,13 +25,14 @@ resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -43,12 +44,14 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ output "etcd" {
|
||||
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}"
|
||||
}
|
||||
|
||||
output "inventory" {
|
||||
|
||||
@@ -33,9 +33,9 @@ aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
#Settings AWS NLB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
|
||||
@@ -24,4 +24,4 @@ kube_control_plane
|
||||
calico_rr
|
||||
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${nlb_api_fqdn}
|
||||
|
||||
@@ -32,7 +32,7 @@ aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = {
|
||||
|
||||
@@ -25,7 +25,7 @@ aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = { }
|
||||
|
||||
@@ -104,11 +104,11 @@ variable "aws_kube_worker_size" {
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
* AWS NLB Settings
|
||||
*
|
||||
*/
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
|
||||
@@ -74,14 +74,23 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `master_sa_email`: Service account email to use for the master nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the master nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the control plane nodes *(Defaults to `false`)*
|
||||
* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)*
|
||||
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
|
||||
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the worker nodes *(Defaults to `false`)*
|
||||
* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)*
|
||||
|
||||
An example variables file can be found `tfvars.json`
|
||||
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "~> 4.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
credentials = file(var.keyfile_location)
|
||||
region = var.region
|
||||
project = var.gcp_project_id
|
||||
version = "~> 3.48"
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
@@ -13,12 +21,17 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
master_preemptible = var.master_preemptible
|
||||
master_additional_disk_type = var.master_additional_disk_type
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
worker_preemptible = var.worker_preemptible
|
||||
worker_additional_disk_type = var.worker_additional_disk_type
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
resource "google_compute_network" "main" {
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
auto_create_subnetworks = false
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "main" {
|
||||
@@ -20,6 +22,8 @@ resource "google_compute_firewall" "deny_all" {
|
||||
|
||||
priority = 1000
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
|
||||
deny {
|
||||
protocol = "all"
|
||||
}
|
||||
@@ -39,6 +43,8 @@ resource "google_compute_firewall" "allow_internal" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ssh" {
|
||||
count = length(var.ssh_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-ssh-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -53,6 +59,8 @@ resource "google_compute_firewall" "ssh" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "api_server" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-api-server-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -67,6 +75,8 @@ resource "google_compute_firewall" "api_server" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "nodeport" {
|
||||
count = length(var.nodeport_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-nodeport-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -81,11 +91,15 @@ resource "google_compute_firewall" "nodeport" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_http" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-http-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80"]
|
||||
@@ -93,11 +107,15 @@ resource "google_compute_firewall" "ingress_http" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_https" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-https-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["443"]
|
||||
@@ -173,7 +191,7 @@ resource "google_compute_disk" "master" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.master_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@@ -229,19 +247,28 @@ resource "google_compute_instance" "master" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.master_preemptible
|
||||
automatic_restart = !var.master_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-forward-rule"
|
||||
|
||||
port_range = "6443"
|
||||
|
||||
target = google_compute_target_pool.master_lb.id
|
||||
target = google_compute_target_pool.master_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-pool"
|
||||
instances = local.master_target_list
|
||||
}
|
||||
@@ -258,7 +285,7 @@ resource "google_compute_disk" "worker" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.worker_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@@ -326,35 +353,48 @@ resource "google_compute_instance" "worker" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.worker_preemptible
|
||||
automatic_restart = !var.worker_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-address"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_http_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-http-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "80"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_https_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-https-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "443"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ output "worker_ip_addresses" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = google_compute_address.worker_lb.address
|
||||
value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : ""
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = google_compute_forwarding_rule.master_lb.ip_address
|
||||
value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : ""
|
||||
}
|
||||
|
||||
@@ -27,6 +27,14 @@ variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
}
|
||||
@@ -35,6 +43,14 @@ variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
@@ -49,6 +65,11 @@ variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
"nodeport_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"ingress_whitelist": [
|
||||
"0.0.0.0/0"
|
||||
],
|
||||
|
||||
"machines": {
|
||||
"master-0": {
|
||||
@@ -24,7 +27,7 @@
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@@ -38,7 +41,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@@ -52,7 +55,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,16 @@ variable "master_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
@@ -54,6 +64,16 @@ variable "worker_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
@@ -70,3 +90,8 @@ variable api_server_whitelist {
|
||||
variable nodeport_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
@@ -97,6 +97,7 @@ terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
|
||||
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `network_zone`: the network zone where the cluster is running
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: Size of the VM
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
|
||||
network_zone = "eu-central"
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
|
||||
@@ -10,6 +10,7 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
@@ -34,9 +35,9 @@ data "template_file" "inventory" {
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
network_id = module.kubernetes.network_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ resource "hcloud_network" "kubernetes" {
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = "eu-central"
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
|
||||
@@ -21,3 +21,7 @@ output "worker_ip_addresses" {
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
||||
@@ -39,3 +39,6 @@ variable "private_network_cidr" {
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
||||
|
||||
@@ -14,3 +14,6 @@ ${list_worker}
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s-cluster:vars]
|
||||
network_id=${network_id}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
variable "network_zone" {
|
||||
description = "The network zone where the cluster is running"
|
||||
default = "eu-central"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
|
||||
@@ -35,7 +35,7 @@ now six total etcd replicas.
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Install dependencies: `sudo pip install -r requirements.txt`
|
||||
- [Install Ansible dependencies](/docs/ansible.md#installing-ansible)
|
||||
- Account with Equinix Metal
|
||||
- An SSH key pair
|
||||
|
||||
@@ -60,9 +60,9 @@ Terraform will be used to provision all of the Equinix Metal resources with base
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
ln -s ../../contrib/terraform/metal/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@@ -101,7 +101,7 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- packet_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
|
||||
#### Enable localhost access
|
||||
|
||||
@@ -119,7 +119,7 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
`.gitignore` file in the `terraform/metal` directory :
|
||||
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
@@ -135,7 +135,7 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/packet
|
||||
terraform init ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
@@ -146,7 +146,7 @@ You can apply the Terraform configuration to your cluster with the following com
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
@@ -156,7 +156,7 @@ ansible-playbook -i hosts ../../cluster.yml
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
@@ -1,16 +1,15 @@
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "packet" {
|
||||
version = "~> 2.0"
|
||||
provider "metal" {
|
||||
}
|
||||
|
||||
resource "packet_ssh_key" "k8s" {
|
||||
resource "metal_ssh_key" "k8s" {
|
||||
count = var.public_key_path != "" ? 1 : 0
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@@ -18,12 +17,12 @@ resource "packet_device" "k8s_master" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master_no_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@@ -31,12 +30,12 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_etcd
|
||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
@@ -44,12 +43,12 @@ resource "packet_device" "k8s_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_node" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_node" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_nodes
|
||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
@@ -57,7 +56,7 @@ resource "packet_device" "k8s_node" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
||||
16
contrib/terraform/metal/output.tf
Normal file
16
contrib/terraform/metal/output.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
output "k8s_masters" {
|
||||
value = metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
cluster_name = "mycluster"
|
||||
|
||||
# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/
|
||||
packet_project_id = "Example-API-Token"
|
||||
metal_project_id = "Example-API-Token"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
||||
@@ -2,12 +2,12 @@ variable "cluster_name" {
|
||||
default = "kubespray"
|
||||
}
|
||||
|
||||
variable "packet_project_id" {
|
||||
variable "metal_project_id" {
|
||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
||||
}
|
||||
|
||||
variable "operating_system" {
|
||||
default = "ubuntu_16_04"
|
||||
default = "ubuntu_20_04"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
@@ -24,23 +24,23 @@ variable "facility" {
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters_no_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_nodes" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.medium.x86"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
@@ -52,6 +52,6 @@ variable "number_of_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
packet = {
|
||||
source = "terraform-providers/packet"
|
||||
metal = {
|
||||
source = "equinix/metal"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,9 +17,10 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [ELASTX](https://elastx.se/)
|
||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||
- [FugaCloud](https://fuga.cloud/)
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/)
|
||||
- [OVH](https://www.ovh.com/)
|
||||
- [Rackspace](https://www.rackspace.com/)
|
||||
- [Safespring](https://www.safespring.com)
|
||||
- [Ultimum](https://ultimum.io/)
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
@@ -247,6 +248,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default |
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
@@ -271,7 +273,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
@@ -283,7 +284,10 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
@@ -411,18 +415,39 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" init
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Customizing with cloud-init
|
||||
|
||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||
One common template is used for all instances. Adjust the file shown below:
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||
|
||||
```ShellSession
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
ssh_pwauth: yes
|
||||
chpasswd:
|
||||
list: |
|
||||
root:secret
|
||||
expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
disable_root: false
|
||||
```
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
@@ -437,7 +462,7 @@ pick it up automatically.
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
router_id = var.router_id
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
port_security_enabled = var.port_security_enabled
|
||||
router_id = var.router_id
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
@@ -23,6 +24,7 @@ module "ips" {
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
bastion_fips = var.bastion_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
@@ -43,6 +45,7 @@ module "compute" {
|
||||
number_of_bastions = var.number_of_bastions
|
||||
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
|
||||
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_nodes = var.k8s_nodes
|
||||
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
|
||||
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
|
||||
@@ -69,6 +72,7 @@ module "compute" {
|
||||
flavor_bastion = var.flavor_bastion
|
||||
k8s_master_fips = module.ips.k8s_master_fips
|
||||
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
|
||||
k8s_masters_fips = module.ips.k8s_masters_fips
|
||||
k8s_node_fips = module.ips.k8s_node_fips
|
||||
k8s_nodes_fips = module.ips.k8s_nodes_fips
|
||||
bastion_fips = module.ips.bastion_fips
|
||||
@@ -80,7 +84,6 @@ module "compute" {
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
@@ -88,8 +91,11 @@ module "compute" {
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
group_vars_path = var.group_vars_path
|
||||
|
||||
network_id = module.network.router_id
|
||||
port_security_enabled = var.port_security_enabled
|
||||
force_null_port_security = var.force_null_port_security
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
|
||||
@@ -15,6 +15,15 @@ data "openstack_images_image_v2" "image_master" {
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
data "template_file" "cloudinit" {
|
||||
template = file("${path.module}/templates/cloudinit.yaml")
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
count = var.use_existing_network ? 1 : 0
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
@@ -150,16 +159,25 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s_master.id,
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.worker.id,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "",
|
||||
])
|
||||
# bastion groups
|
||||
bastion_sec_groups = compact(concat([
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.bastion[0].id,
|
||||
]))
|
||||
# etcd groups
|
||||
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
# glusterfs groups
|
||||
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
@@ -169,12 +187,27 @@ locals {
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
count = var.number_of_bastions
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -189,25 +222,35 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
count = var.number_of_k8s_masters
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters
|
||||
@@ -215,6 +258,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@@ -231,11 +275,9 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@@ -246,15 +288,87 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
@@ -262,6 +376,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@@ -278,11 +393,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@@ -293,15 +406,29 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "etcd_port" {
|
||||
count = var.number_of_etcd
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = var.number_of_etcd
|
||||
@@ -309,6 +436,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@@ -323,13 +451,11 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||
}
|
||||
@@ -338,11 +464,25 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
@@ -365,11 +505,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@@ -380,11 +518,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
@@ -392,6 +544,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@@ -407,11 +560,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@@ -422,11 +573,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
count = var.number_of_k8s_nodes
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes
|
||||
@@ -434,6 +599,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -449,10 +615,9 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@@ -464,15 +629,29 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
@@ -480,6 +659,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -495,11 +675,9 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
@@ -510,11 +688,25 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
@@ -522,6 +714,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -537,11 +730,9 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
@@ -552,15 +743,29 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
@@ -582,11 +787,9 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
@@ -597,44 +800,46 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "bastion" {
|
||||
count = var.number_of_bastions
|
||||
floating_ip = var.bastion_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.bastion.*.id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
|
||||
count = var.number_of_k8s_masters
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
|
||||
floating_ip = var.k8s_master_fips[count.index]
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_masters_fips[each.key].address
|
||||
port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_node" {
|
||||
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
|
||||
floating_ip = var.k8s_node_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_nodes_fips[each.key].address
|
||||
instance_id = openstack_compute_instance_v2.k8s_nodes[each.key].id
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
||||
@@ -68,6 +68,14 @@ variable "network_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "network_router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = list
|
||||
}
|
||||
@@ -80,6 +88,10 @@ variable "k8s_node_fips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters_fips" {
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "k8s_nodes_fips" {
|
||||
type = map
|
||||
}
|
||||
@@ -104,9 +116,9 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "wait_for_floatingip" {}
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
@@ -165,3 +177,11 @@ variable "image_master_uuid" {
|
||||
variable "group_vars_path" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
@@ -14,6 +14,12 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
|
||||
|
||||
@@ -3,6 +3,10 @@ output "k8s_master_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
output "k8s_masters_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_masters
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
|
||||
@@ -16,6 +16,8 @@ variable "router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
||||
@@ -11,10 +11,11 @@ data "openstack_networking_router_v2" "k8s" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = var.network_name
|
||||
count = var.use_neutron
|
||||
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
|
||||
admin_state_up = "true"
|
||||
name = var.network_name
|
||||
count = var.use_neutron
|
||||
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
|
||||
@@ -2,6 +2,10 @@ output "router_id" {
|
||||
value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0)
|
||||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,10 @@ variable "dns_nameservers" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {}
|
||||
|
||||
variable "use_neutron" {}
|
||||
|
||||
@@ -32,6 +32,28 @@ number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||
|
||||
flavor_k8s_master = "<UUID>"
|
||||
|
||||
k8s_masters = {
|
||||
# "master-1" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-2" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = false
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-3" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
}
|
||||
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
|
||||
@@ -52,6 +74,9 @@ number_of_k8s_nodes_no_floating_ip = 4
|
||||
# networking
|
||||
network_name = "<network>"
|
||||
|
||||
# Use a existing network with the name of network_name. Set to false to create a network with name of network_name.
|
||||
# use_existing_network = true
|
||||
|
||||
external_net = "<UUID>"
|
||||
|
||||
subnet_cidr = "<cidr>"
|
||||
@@ -59,3 +84,6 @@ subnet_cidr = "<cidr>"
|
||||
floatingip_pool = "<pool>"
|
||||
|
||||
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||
|
||||
# Force port security to be null. Some cloud providers do not allow to set port security.
|
||||
# force_null_port_security = false
|
||||
@@ -137,6 +137,12 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
description = "Use an existing network"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "network_dns_domain" {
|
||||
description = "dns_domain for the internal network"
|
||||
type = string
|
||||
@@ -148,6 +154,18 @@ variable "use_neutron" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
description = "Enable port security on the internal network"
|
||||
type = bool
|
||||
default = "true"
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
description = "Force port security to be null. Some providers does not allow setting port security"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
description = "Subnet CIDR block."
|
||||
type = string
|
||||
@@ -268,6 +286,10 @@ variable "router_internal_port_id" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_masters" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
output "k8s_masters" {
|
||||
value = packet_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = packet_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = packet_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = packet_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
||||
@@ -114,10 +114,10 @@ def iterhosts(resources):
|
||||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
'''yield ip tuples of (port_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
if resource_type == 'openstack_networking_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
@@ -195,8 +195,8 @@ def parse_bool(string_form):
|
||||
raise ValueError('could not convert %r to a bool' % string_form)
|
||||
|
||||
|
||||
@parses('packet_device')
|
||||
def packet_device(resource, tfvars=None):
|
||||
@parses('metal_device')
|
||||
def metal_device(resource, tfvars=None):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['hostname']
|
||||
groups = []
|
||||
@@ -213,14 +213,14 @@ def packet_device(resource, tfvars=None):
|
||||
'state': raw_attrs['state'],
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
||||
'ansible_ssh_user': 'root', # Use root by default in packet
|
||||
'ansible_ssh_user': 'root', # Use root by default in metal
|
||||
# generic
|
||||
'ipv4_address': raw_attrs['network.0.address'],
|
||||
'public_ipv4': raw_attrs['network.0.address'],
|
||||
'ipv6_address': raw_attrs['network.1.address'],
|
||||
'public_ipv6': raw_attrs['network.1.address'],
|
||||
'private_ipv4': raw_attrs['network.2.address'],
|
||||
'provider': 'packet',
|
||||
'provider': 'metal',
|
||||
}
|
||||
|
||||
if raw_attrs['operating_system'] == 'flatcar_stable':
|
||||
@@ -228,10 +228,10 @@ def packet_device(resource, tfvars=None):
|
||||
attrs.update({'ansible_ssh_user': 'core'})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('packet_operating_system=' + attrs['operating_system'])
|
||||
groups.append('packet_locked=%s' % attrs['locked'])
|
||||
groups.append('packet_state=' + attrs['state'])
|
||||
groups.append('packet_plan=' + attrs['plan'])
|
||||
groups.append('metal_operating_system=' + attrs['operating_system'])
|
||||
groups.append('metal_locked=%s' % attrs['locked'])
|
||||
groups.append('metal_state=' + attrs['state'])
|
||||
groups.append('metal_plan=' + attrs['plan'])
|
||||
|
||||
# groups specific to kubespray
|
||||
groups = groups + attrs['tags']
|
||||
@@ -243,13 +243,13 @@ def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
'port_id': raw_attrs['port_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
return raw_attrs['port_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
@@ -282,6 +282,7 @@ def openstack_host(resource, module_name):
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['access_ip_v4'],
|
||||
'private_ipv4': raw_attrs['access_ip_v4'],
|
||||
'port_id' : raw_attrs['network.0.port'],
|
||||
'provider': 'openstack',
|
||||
}
|
||||
|
||||
@@ -339,10 +340,10 @@ def openstack_host(resource, module_name):
|
||||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
port_id = host[1]['port_id']
|
||||
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
if port_id in ips:
|
||||
ip = ips[port_id]
|
||||
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
|
||||
@@ -104,9 +104,22 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `plan`: Preconfigured cpu/mem plan to use (disables `cpu` and `mem` attributes below)
|
||||
* `cpu`: number of cpu cores
|
||||
* `mem`: memory size in MB
|
||||
* `disk_size`: The size of the storage in GB
|
||||
* `additional_disks`: Additional disks to attach to the node.
|
||||
* `size`: The size of the additional disk in GB
|
||||
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
||||
* `firewall_enabled`: Enable firewall rules
|
||||
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
|
||||
@@ -20,6 +20,8 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -30,6 +32,8 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -49,6 +53,8 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -68,6 +74,8 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -86,3 +94,32 @@ machines = {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
k8s_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
||||
@@ -22,6 +22,14 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
firewall_enabled = var.firewall_enabled
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancers = var.loadbalancers
|
||||
}
|
||||
|
||||
#
|
||||
|
||||
@@ -10,6 +10,16 @@ locals {
|
||||
]
|
||||
])
|
||||
|
||||
lb_backend_servers = flatten([
|
||||
for lb_name, loadbalancer in var.loadbalancers : [
|
||||
for backend_server in loadbalancer.backend_servers : {
|
||||
port = loadbalancer.port
|
||||
lb_name = lb_name
|
||||
server_name = backend_server
|
||||
}
|
||||
]
|
||||
])
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{ if var.prefix != ""}${var.prefix}-%{ endif }"
|
||||
@@ -45,8 +55,9 @@ resource "upcloud_server" "master" {
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
@@ -65,6 +76,13 @@ resource "upcloud_server" "master" {
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
# Ignore volumes created by csi-driver
|
||||
lifecycle {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
@@ -94,8 +112,9 @@ resource "upcloud_server" "worker" {
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
@@ -114,6 +133,13 @@ resource "upcloud_server" "worker" {
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
# Ignore volumes created by csi-driver
|
||||
lifecycle {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
@@ -134,3 +160,151 @@ resource "upcloud_server" "worker" {
|
||||
create_password = false
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "master" {
|
||||
for_each = upcloud_server.master
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow master API access from this network"
|
||||
destination_port_end = "6443"
|
||||
destination_port_start = "6443"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny master API access from other networks"
|
||||
destination_port_end = "6443"
|
||||
destination_port_start = "6443"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow SSH from this network"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny SSH from other networks"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "k8s" {
|
||||
for_each = upcloud_server.worker
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow SSH from this network"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny SSH from other networks"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer" "lb" {
|
||||
count = var.loadbalancer_enabled ? 1 : 0
|
||||
configured_status = "started"
|
||||
name = "${local.resource-prefix}lb"
|
||||
plan = var.loadbalancer_plan
|
||||
zone = var.zone
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_backend" "lb_backend" {
|
||||
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
||||
|
||||
loadbalancer = upcloud_loadbalancer.lb[0].id
|
||||
name = "lb-backend-${each.key}"
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
||||
|
||||
loadbalancer = upcloud_loadbalancer.lb[0].id
|
||||
name = "lb-frontend-${each.key}"
|
||||
mode = "tcp"
|
||||
port = each.value.port
|
||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
for_each = {
|
||||
for be_server in local.lb_backend_servers:
|
||||
"${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server
|
||||
if var.loadbalancer_enabled
|
||||
}
|
||||
|
||||
backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
ip = merge(upcloud_server.master, upcloud_server.worker)[each.value.server_name].network_interface[1].ip_address
|
||||
port = each.value.port
|
||||
weight = 100
|
||||
max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000
|
||||
enabled = true
|
||||
}
|
||||
|
||||
@@ -18,3 +18,7 @@ output "worker_ip" {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "loadbalancer_domain" {
|
||||
value = var.loadbalancer_enabled ? upcloud_loadbalancer.lb[0].dns_name : null
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
@@ -29,3 +30,38 @@ variable "machines" {
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "firewall_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "master_allowed_remote_ips" {
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "k8s_allowed_remote_ips" {
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "loadbalancer_plan" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
version = "~>2.4.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -6,3 +6,7 @@ output "master_ip" {
|
||||
output "worker_ip" {
|
||||
value = module.kubernetes.worker_ip
|
||||
}
|
||||
|
||||
output "loadbalancer_domain" {
|
||||
value = module.kubernetes.loadbalancer_domain
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -30,6 +32,8 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -49,6 +53,8 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -68,6 +74,8 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@@ -86,3 +94,32 @@ machines = {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
k8s_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ variable "machines" {
|
||||
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
@@ -54,3 +55,46 @@ variable "UPCLOUD_USERNAME" {
|
||||
variable "UPCLOUD_PASSWORD" {
|
||||
description = "Password for UpCloud API user"
|
||||
}
|
||||
|
||||
variable "firewall_enabled" {
|
||||
description = "Enable firewall rules"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "master_allowed_remote_ips" {
|
||||
description = "List of IP start/end addresses allowed to access API of masters"
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "k8s_allowed_remote_ips" {
|
||||
description = "List of IP start/end addresses allowed to SSH to hosts"
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
description = "Enable load balancer"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancer_plan" {
|
||||
description = "Load balancer plan (development/production-small)"
|
||||
default = "development"
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
version = "~>2.4.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -105,8 +105,7 @@ ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
* `vsphere_datacenter`: The identifier of vSphere data center
|
||||
* `vsphere_compute_cluster`: The identifier of vSphere compute cluster
|
||||
* `vsphere_datastore`: The identifier of vSphere data store
|
||||
* `vsphere_server`: The address of vSphere server
|
||||
* `vsphere_hostname`: The IP address of vSphere hostname
|
||||
* `vsphere_server`: This is the vCenter server name or address for vSphere API operations.
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand)
|
||||
|
||||
@@ -125,5 +124,7 @@ ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
* `worker_cores`: The number of CPU cores for the worker nodes (default: 16)
|
||||
* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192)
|
||||
* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100)
|
||||
* `vapp`: Boolean to set the template type to vapp. (Default: false)
|
||||
* `interface_name`: Name of the interface to configure. (Default: ens192)
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
||||
@@ -34,6 +34,5 @@ vsphere_datacenter = "i-did-not-read-the-docs"
|
||||
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
|
||||
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
|
||||
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
|
||||
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg
|
||||
|
||||
@@ -23,11 +23,6 @@ data "vsphere_network" "network" {
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_host" "host" {
|
||||
name = var.vsphere_hostname
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
data "vsphere_virtual_machine" "template" {
|
||||
name = var.template_name
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
@@ -40,7 +35,7 @@ data "vsphere_compute_cluster" "compute_cluster" {
|
||||
|
||||
resource "vsphere_resource_pool" "pool" {
|
||||
name = "${var.prefix}-cluster-pool"
|
||||
parent_resource_pool_id = data.vsphere_host.host.resource_pool_id
|
||||
parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
@@ -74,11 +69,13 @@ module "kubernetes" {
|
||||
scsi_type = data.vsphere_virtual_machine.template.scsi_type
|
||||
network_id = data.vsphere_network.network.id
|
||||
adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0]
|
||||
interface_name = var.interface_name
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
|
||||
|
||||
template_id = data.vsphere_virtual_machine.template.id
|
||||
vapp = var.vapp
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
@@ -87,30 +84,17 @@ module "kubernetes" {
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
resource "local_file" "inventory" {
|
||||
content = templatefile("${path.module}/templates/inventory.tpl", {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip),
|
||||
values(module.kubernetes.master_ip),
|
||||
range(1, length(module.kubernetes.master_ip) + 1)))
|
||||
range(1, length(module.kubernetes.master_ip) + 1))),
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s",
|
||||
keys(module.kubernetes.worker_ip),
|
||||
values(module.kubernetes.worker_ip)))
|
||||
list_master = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.master_ip)))
|
||||
list_worker = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.worker_ip)))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
values(module.kubernetes.worker_ip))),
|
||||
list_master = join("\n", formatlist("%s", keys(module.kubernetes.master_ip))),
|
||||
list_worker = join("\n", formatlist("%s", keys(module.kubernetes.worker_ip)))
|
||||
})
|
||||
filename = var.inventory_file
|
||||
}
|
||||
|
||||
@@ -46,15 +46,31 @@ resource "vsphere_virtual_machine" "worker" {
|
||||
client_device = true
|
||||
}
|
||||
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
dynamic "vapp" {
|
||||
for_each = var.vapp ? [1] : []
|
||||
|
||||
content {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys }))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extra_config = {
|
||||
"isolation.tools.copy.disable" = "FALSE"
|
||||
"isolation.tools.paste.disable" = "FALSE"
|
||||
"isolation.tools.setGUIOptions.enable" = "TRUE"
|
||||
"guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys }))
|
||||
"guestinfo.userdata.encoding" = "base64"
|
||||
"guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}",
|
||||
interface_name = var.interface_name
|
||||
ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys }))
|
||||
"guestinfo.metadata.encoding" = "base64"
|
||||
}
|
||||
}
|
||||
|
||||
resource "vsphere_virtual_machine" "master" {
|
||||
@@ -105,13 +121,29 @@ resource "vsphere_virtual_machine" "master" {
|
||||
client_device = true
|
||||
}
|
||||
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
dynamic "vapp" {
|
||||
for_each = var.vapp ? [1] : []
|
||||
|
||||
content {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys }))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extra_config = {
|
||||
"isolation.tools.copy.disable" = "FALSE"
|
||||
"isolation.tools.paste.disable" = "FALSE"
|
||||
"isolation.tools.setGUIOptions.enable" = "TRUE"
|
||||
"guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys }))
|
||||
"guestinfo.userdata.encoding" = "base64"
|
||||
"guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}",
|
||||
interface_name = var.interface_name
|
||||
ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys }))
|
||||
"guestinfo.metadata.encoding" = "base64"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
output "master_ip" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => machine.ip
|
||||
"${var.prefix}-${name}" => machine.ip
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,7 @@ output "master_ip" {
|
||||
output "worker_ip" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => machine.ip
|
||||
"${var.prefix}-${name}" => machine.ip
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
#cloud-config
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
@@ -0,0 +1,14 @@
|
||||
instance-id: ${hostname}
|
||||
local-hostname: ${hostname}
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
${interface_name}:
|
||||
match:
|
||||
name: ${interface_name}
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- ${ip}/${netmask}
|
||||
gateway4: ${gw}
|
||||
nameservers:
|
||||
addresses: [${dns}]
|
||||
@@ -6,23 +6,12 @@ ssh_authorized_keys:
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/20-internal-network.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
"lo:0":
|
||||
match:
|
||||
name: lo
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- 172.17.0.100/32
|
||||
- path: /etc/netplan/10-user-network.yaml
|
||||
content: |
|
||||
content: |.
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
ens192:
|
||||
${interface_name}:
|
||||
dhcp4: false #true to use dhcp
|
||||
addresses:
|
||||
- ${ip}/${netmask}
|
||||
@@ -18,9 +18,13 @@ variable "datastore_id" {}
|
||||
variable "guest_id" {}
|
||||
variable "scsi_type" {}
|
||||
variable "network_id" {}
|
||||
variable "interface_name" {}
|
||||
variable "adapter_type" {}
|
||||
variable "disk_thin_provisioned" {}
|
||||
variable "template_id" {}
|
||||
variable "vapp" {
|
||||
type = bool
|
||||
}
|
||||
variable "firmware" {}
|
||||
variable "folder" {}
|
||||
variable "ssh_public_keys" {
|
||||
|
||||
@@ -29,6 +29,5 @@ vsphere_datacenter = "i-did-not-read-the-docs"
|
||||
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
|
||||
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
|
||||
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
|
||||
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg
|
||||
|
||||
@@ -27,8 +27,6 @@ variable "vsphere_password" {}
|
||||
|
||||
variable "vsphere_server" {}
|
||||
|
||||
variable "vsphere_hostname" {}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
@@ -37,6 +35,13 @@ variable "ssh_public_keys" {
|
||||
variable "template_name" {}
|
||||
|
||||
# Optional variables (ones where reasonable defaults exist)
|
||||
variable "vapp" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "interface_name" {
|
||||
default = "ens192"
|
||||
}
|
||||
|
||||
variable "folder" {
|
||||
default = ""
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
* [Weave](docs/weave.md)
|
||||
* [Multus](docs/multus.md)
|
||||
* Ingress
|
||||
* [kube-vip](docs/kube-vip.md)
|
||||
* [ALB Ingress](docs/ingress_controller/alb_ingress_controller.md)
|
||||
* [MetalLB](docs/metallb.md)
|
||||
* [Nginx Ingress](docs/ingress_controller/ingress_nginx.md)
|
||||
@@ -33,10 +34,11 @@
|
||||
* [Fedora CoreOS](docs/fcos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* [RedHat Enterprise Linux](docs/rhel.md)
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos8.md)
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
* [Docker](docs/docker.md)
|
||||
* [CRI-O](docs/cri-o.md)
|
||||
* [Kata Containers](docs/kata-containers.md)
|
||||
* [gVisor](docs/gvisor.md)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user