mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
361 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
031cf565ec | ||
|
|
2c23027794 | ||
|
|
15589dd88f | ||
|
|
1a7f52c889 | ||
|
|
24cbf2287c | ||
|
|
a56d9de502 | ||
|
|
95e14ffb54 | ||
|
|
6139ee3add | ||
|
|
f0c0390646 | ||
|
|
e7a1949d85 | ||
|
|
ff8cb46bb9 | ||
|
|
399cb9707a | ||
|
|
6d9cd2d720 | ||
|
|
622537bd33 | ||
|
|
9169f840c2 | ||
|
|
79996b557b | ||
|
|
be8e5e1fdc | ||
|
|
bb0c3537cb | ||
|
|
36a5143478 | ||
|
|
7b86b87dca | ||
|
|
0fe2b66097 | ||
|
|
385f7f6e75 | ||
|
|
9f1e3db906 | ||
|
|
b63d900625 | ||
|
|
ac295de64c | ||
|
|
111571b67a | ||
|
|
a4bce333a3 | ||
|
|
c53a6eca86 | ||
|
|
7c2785e083 | ||
|
|
aab4149ab0 | ||
|
|
89a4b92753 | ||
|
|
5414a410bd | ||
|
|
ad796d188d | ||
|
|
de8cd5cd7f | ||
|
|
cc93c4fe12 | ||
|
|
c456a311d6 | ||
|
|
ed4b4b8482 | ||
|
|
8d9f207836 | ||
|
|
2a3164e040 | ||
|
|
f10d1327d4 | ||
|
|
d314174149 | ||
|
|
9885fe73dc | ||
|
|
f2cf323ecf | ||
|
|
cf4f2b4f14 | ||
|
|
fbc13ea6dc | ||
|
|
b8bc8eee41 | ||
|
|
11380769cd | ||
|
|
ee62c99eb1 | ||
|
|
843d439898 | ||
|
|
8d5da5cfca | ||
|
|
5a2c75a3cb | ||
|
|
c1e4cef75b | ||
|
|
5d73b9ccc5 | ||
|
|
9efe1fe09d | ||
|
|
4bbec963e6 | ||
|
|
348fc5b109 | ||
|
|
101864c050 | ||
|
|
fe150d4e4d | ||
|
|
048ac264a3 | ||
|
|
add7570a94 | ||
|
|
db77bd9588 | ||
|
|
768fe05eea | ||
|
|
1c48a001df | ||
|
|
a7276901a3 | ||
|
|
b0fa189b3c | ||
|
|
cc57152cc0 | ||
|
|
046f3eebcb | ||
|
|
ea874899c7 | ||
|
|
1782d19e1f | ||
|
|
e2476fbd0b | ||
|
|
07cd81ef58 | ||
|
|
92f542938c | ||
|
|
9df2306ee9 | ||
|
|
495d0b659a | ||
|
|
a2f8f17270 | ||
|
|
0e2329b59e | ||
|
|
7c011b14df | ||
|
|
ad68b23d8a | ||
|
|
670d977dfb | ||
|
|
70143d87bf | ||
|
|
e21ca5433a | ||
|
|
68ad4ff4d9 | ||
|
|
d7b0ff3de6 | ||
|
|
725f9ea3bd | ||
|
|
a9684648ab | ||
|
|
6b1dfa4ae6 | ||
|
|
9cc73bdf08 | ||
|
|
114ab5e4e6 | ||
|
|
77ebf4531c | ||
|
|
1551fe01f9 | ||
|
|
29874baf8a | ||
|
|
e6fe9d5807 | ||
|
|
81317505eb | ||
|
|
d57c27ffcf | ||
|
|
8d7b25d4f0 | ||
|
|
8e809aed01 | ||
|
|
b4c87c669b | ||
|
|
bca704e7e9 | ||
|
|
d50eb60827 | ||
|
|
dbd9aaf1ea | ||
|
|
d20d5e648f | ||
|
|
96640e68e2 | ||
|
|
3e007df97c | ||
|
|
06584ee3aa | ||
|
|
26e3142c95 | ||
|
|
33585fa673 | ||
|
|
665ce82d71 | ||
|
|
fb78bfaaae | ||
|
|
b4ce221002 | ||
|
|
444b1dafdc | ||
|
|
d6174b22e9 | ||
|
|
c75f394707 | ||
|
|
94ce99eb0a | ||
|
|
0515814e0c | ||
|
|
c87f4f613e | ||
|
|
f12e9fa22a | ||
|
|
3ca11b70c4 | ||
|
|
1cfaf927c9 | ||
|
|
45135ad3e4 | ||
|
|
9c06dd2863 | ||
|
|
b2088b72dd | ||
|
|
4e721bfd9d | ||
|
|
f52ed9f91e | ||
|
|
88f3b86410 | ||
|
|
3117858dcd | ||
|
|
8c36915ea0 | ||
|
|
5176e5c968 | ||
|
|
e95c733a81 | ||
|
|
15c2919ecc | ||
|
|
774f4dbbf7 | ||
|
|
b1e852a785 | ||
|
|
42ea4d2cfd | ||
|
|
9fd14cb6ea | ||
|
|
4e34803b1e | ||
|
|
7abcf6e0b9 | ||
|
|
e5ad0836bc | ||
|
|
2c50f20429 | ||
|
|
a15d626771 | ||
|
|
fd9b26675e | ||
|
|
eb33f085b6 | ||
|
|
fb774d4317 | ||
|
|
459bee6d2c | ||
|
|
6e080cd9b0 | ||
|
|
8a5ba6b20c | ||
|
|
c3ec3ff902 | ||
|
|
284a21012c | ||
|
|
7897c34ba3 | ||
|
|
8cc84e132a | ||
|
|
00ad151186 | ||
|
|
4265149463 | ||
|
|
ee8d6ab4fc | ||
|
|
a80745b5bd | ||
|
|
bd3f2d5cef | ||
|
|
e9c591e6de | ||
|
|
710d5ae48e | ||
|
|
fc769eb870 | ||
|
|
eec2ed5809 | ||
|
|
f7dd20f21c | ||
|
|
bfc9bcb8c7 | ||
|
|
8eb26c21be | ||
|
|
3c66e4cdba | ||
|
|
f0f2b81276 | ||
|
|
45ed6de315 | ||
|
|
c9290182be | ||
|
|
893538d8e6 | ||
|
|
246c8209c1 | ||
|
|
36fe2cb5ea | ||
|
|
9d6cc3a8d5 | ||
|
|
8870178a2d | ||
|
|
b0079ccd77 | ||
|
|
1772d122b2 | ||
|
|
756ae926ba | ||
|
|
2c1db56213 | ||
|
|
d672cef21c | ||
|
|
27e239c8d6 | ||
|
|
f1d7af11ee | ||
|
|
59a097b255 | ||
|
|
d40783022b | ||
|
|
7a3a473ccf | ||
|
|
2cdf752481 | ||
|
|
26f93feb2d | ||
|
|
d4aba0af48 | ||
|
|
42d12afbc6 | ||
|
|
022468ae3e | ||
|
|
3bb42cc66a | ||
|
|
8b5b27bb51 | ||
|
|
7328e0e1ac | ||
|
|
eeaf2ea4cf | ||
|
|
42eb8e4663 | ||
|
|
c13d0db0cc | ||
|
|
dba2026002 | ||
|
|
a62f74259c | ||
|
|
a2331fec55 | ||
|
|
b6872a0be3 | ||
|
|
bc7a73ca2c | ||
|
|
c405944e9d | ||
|
|
7eab889c07 | ||
|
|
bb55f68f95 | ||
|
|
658543c949 | ||
|
|
5b382668f5 | ||
|
|
b7692fad09 | ||
|
|
fbdda81515 | ||
|
|
7484888e42 | ||
|
|
f783a638a3 | ||
|
|
2d18e19263 | ||
|
|
ff7d489f2d | ||
|
|
6d29a5981c | ||
|
|
10b75d1d51 | ||
|
|
aa447585c4 | ||
|
|
f6c32c3ea3 | ||
|
|
d208896c46 | ||
|
|
08506f5139 | ||
|
|
2c4b11f321 | ||
|
|
d890d2f277 | ||
|
|
793f3990a0 | ||
|
|
9d439d2e5b | ||
|
|
db03f17486 | ||
|
|
dff78f616e | ||
|
|
d3a4d8dc24 | ||
|
|
dc58159d16 | ||
|
|
b60d5647a2 | ||
|
|
2bcfb3fea3 | ||
|
|
66f27ed1f3 | ||
|
|
cb84b93930 | ||
|
|
32a5453473 | ||
|
|
97d126ac8b | ||
|
|
deea7bb87b | ||
|
|
1bd1825ecb | ||
|
|
20e36191bb | ||
|
|
769566f36c | ||
|
|
ddd230485b | ||
|
|
5ee0cbaa42 | ||
|
|
ff675d40f9 | ||
|
|
0eebe43c08 | ||
|
|
069636e5b4 | ||
|
|
a03540dabc | ||
|
|
f6d69d0a00 | ||
|
|
cc2f26b8e9 | ||
|
|
3e687bbe9a | ||
|
|
c5113d3352 | ||
|
|
4d9712a3ef | ||
|
|
5b9b2c0973 | ||
|
|
a5af87758a | ||
|
|
8b11de5425 | ||
|
|
ff928e0e66 | ||
|
|
952191db99 | ||
|
|
61adca2a6d | ||
|
|
9872ed4bb2 | ||
|
|
3aa2d56da9 | ||
|
|
6a398724b6 | ||
|
|
af3823bced | ||
|
|
1e601bb2ef | ||
|
|
e4d240b1b7 | ||
|
|
e3470b28c5 | ||
|
|
e9a48770a7 | ||
|
|
0322b69f63 | ||
|
|
e587e82f7f | ||
|
|
5f5199bf53 | ||
|
|
876c4df1b6 | ||
|
|
e68ec257a3 | ||
|
|
216e0b2a52 | ||
|
|
ab0ff2ab3c | ||
|
|
5cd65f9c45 | ||
|
|
4e47c267fb | ||
|
|
cb47bbf753 | ||
|
|
c41d200a95 | ||
|
|
771d537ff3 | ||
|
|
8ca1f4ce44 | ||
|
|
625ec529ff | ||
|
|
caa81f3ac2 | ||
|
|
8092f57695 | ||
|
|
965a1234d3 | ||
|
|
15bc445a9c | ||
|
|
bb72de0dc9 | ||
|
|
6da0ecfa55 | ||
|
|
1ccc10baf8 | ||
|
|
45c2900e71 | ||
|
|
eb583dd2f3 | ||
|
|
f6233ffc9a | ||
|
|
46ee9faca9 | ||
|
|
f320b79c0c | ||
|
|
6cc05c103a | ||
|
|
88577b9889 | ||
|
|
5821f9748a | ||
|
|
c58bd33af7 | ||
|
|
cf7c60029b | ||
|
|
046e315bfd | ||
|
|
251800eb16 | ||
|
|
fe16fecd8f | ||
|
|
9ea9604b3f | ||
|
|
a32cd85eb7 | ||
|
|
95b460ae94 | ||
|
|
57e467c03c | ||
|
|
764a2fd5a8 | ||
|
|
d197130148 | ||
|
|
39d68822ed | ||
|
|
4ece73d432 | ||
|
|
60a217766f | ||
|
|
309240cd6f | ||
|
|
6b0d26ddf0 | ||
|
|
5aa8df163e | ||
|
|
881dc8172c | ||
|
|
aff441a01f | ||
|
|
44a14d0b3e | ||
|
|
f106bf5bc4 | ||
|
|
39b8336f3f | ||
|
|
a6bc284abd | ||
|
|
6b7b8a2303 | ||
|
|
8f20d90f88 | ||
|
|
047f098660 | ||
|
|
3b2554217b | ||
|
|
672d50393c | ||
|
|
d4467ab1c6 | ||
|
|
ebeb57ee7c | ||
|
|
f9355ea14d | ||
|
|
2ca6819cdf | ||
|
|
437372021d | ||
|
|
78ac01add7 | ||
|
|
3b3938c6a6 | ||
|
|
36fc05d2fd | ||
|
|
7abc747b56 | ||
|
|
9f976e568d | ||
|
|
9d7142f476 | ||
|
|
50f77cca1d | ||
|
|
33ebf124c4 | ||
|
|
03e162b342 | ||
|
|
d8b06f3e2f | ||
|
|
d6f206b5fd | ||
|
|
357a15ffd4 | ||
|
|
a3f892c76c | ||
|
|
2778ac61a4 | ||
|
|
c7b00caeaa | ||
|
|
7fe255e5bb | ||
|
|
93f7a26896 | ||
|
|
3d617fbf88 | ||
|
|
c59c3a1bcf | ||
|
|
4c0bf6225a | ||
|
|
b11662a887 | ||
|
|
11f1f71b3b | ||
|
|
0e9d1e09e3 | ||
|
|
65d2a3b0e5 | ||
|
|
8165da3f3d | ||
|
|
4b7347f1cd | ||
|
|
e6902d8ecc | ||
|
|
a5137affeb | ||
|
|
a423927ac9 | ||
|
|
31c2922752 | ||
|
|
7e81855e24 | ||
|
|
2510092599 | ||
|
|
6113a3f350 | ||
|
|
7d6fc1d680 | ||
|
|
91a101c855 | ||
|
|
1de127470f | ||
|
|
40de468413 | ||
|
|
c402feffbd | ||
|
|
f74d6b084b | ||
|
|
dd022f2dbc | ||
|
|
19928dea2b | ||
|
|
21273926ce | ||
|
|
c03bab3246 | ||
|
|
71347322d6 |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -3,3 +3,11 @@
|
||||
inventory/vagrant_ansible_inventory
|
||||
temp
|
||||
.idea
|
||||
.tox
|
||||
.cache
|
||||
*.egg-info
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
/ssh-bastion.conf
|
||||
|
||||
422
.gitlab-ci.yml
Normal file
422
.gitlab-ci.yml
Normal file
@@ -0,0 +1,422 @@
|
||||
stages:
|
||||
- unit-tests
|
||||
- deploy-gce-part1
|
||||
- deploy-gce-part2
|
||||
- deploy-gce-special
|
||||
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
|
||||
# asia-east1-a
|
||||
# asia-northeast1-a
|
||||
# europe-west1-b
|
||||
# us-central1-a
|
||||
# us-east1-b
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install ansible
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- kubernetes
|
||||
- docker
|
||||
image: quay.io/ant31/kargo:master
|
||||
|
||||
.docker_service: &docker_service
|
||||
services:
|
||||
- docker:dind
|
||||
|
||||
.create_cluster: &create_cluster
|
||||
<<: *job
|
||||
<<: *docker_service
|
||||
|
||||
.gce_variables: &gce_variables
|
||||
GCE_USER: travis
|
||||
SSH_USER: $GCE_USER
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CONTAINER_ENGINE: docker
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
BOOTSTRAP_OS: none
|
||||
LOG_LEVEL: "-vv"
|
||||
|
||||
.gce: &gce
|
||||
<<: *job
|
||||
<<: *docker_service
|
||||
cache:
|
||||
key: "$CI_BUILD_REF_NAME"
|
||||
paths:
|
||||
- downloads/
|
||||
- $HOME/.cache
|
||||
stage: deploy-gce
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install ansible==2.1.3.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- ansible-playbook --version
|
||||
- cp tests/ansible.cfg .
|
||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
script:
|
||||
- pwd
|
||||
- ls
|
||||
- echo ${PWD}
|
||||
- >
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Create cluster
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
cluster.yml
|
||||
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
|
||||
## Ping the between 2 pod
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
|
||||
## Advanced DNS checks
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
after_script:
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Test matrix. Leave the comments for markup scripts.
|
||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLUSTER_MODE: separated
|
||||
BOOTSTRAP_OS: coreos
|
||||
|
||||
.debian8_canal_ha_variables: &debian8_canal_ha_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: debian-8-kubespray
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: ha
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.debian8_calico_variables: &debian8_calico_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: debian-8-kubespray
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: separated
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separated
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: coreos-alpha
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: ha
|
||||
BOOTSTRAP_OS: coreos
|
||||
|
||||
# Builds for PRs only (auto) and triggers (auto)
|
||||
coreos-calico-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_sep_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
coreos-calico-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_sep_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos7-flannel:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
centos7-flannel-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
ubuntu-weave-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_weave_sep_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
ubuntu-weave-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_weave_sep_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
debian8-canal-ha:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_canal_ha_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
debian8-canal-ha-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_canal_ha_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
rhel7-weave:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *rhel7_weave_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
rhel7-weave-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *rhel7_weave_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
debian8-calico:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_calico_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
debian8-calico-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *debian8_calico_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
coreos-canal:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_canal_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
coreos-canal-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_canal_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
rhel7-canal-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *rhel7_canal_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/,]
|
||||
|
||||
rhel7-canal-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *rhel7_canal_sep_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos7-calico-ha:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_calico_ha_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
centos7-calico-ha-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_calico_ha_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||
coreos-alpha-weave-ha:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_alpha_weave_ha_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
syntax-check:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||
except: ['triggers']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
<<: *job
|
||||
script:
|
||||
- pip install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
149
.travis.yml
149
.travis.yml
@@ -1,149 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
git:
|
||||
depth: 5
|
||||
|
||||
env:
|
||||
global:
|
||||
GCE_USER=travis
|
||||
SSH_USER=$GCE_USER
|
||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||
CONTAINER_ENGINE=docker
|
||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||
matrix:
|
||||
# Debian Jessie
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=europe-west1-b
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=us-central1-c
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
# Centos 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=europe-west1-b
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=us-central1-c
|
||||
|
||||
# Redhat 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=europe-west1-b
|
||||
|
||||
# Ubuntu 16.04
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=us-central1-c
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=us-east1-d
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
# Ubuntu 15.10
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=europe-west1-b
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=us-central1-a
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
|
||||
before_install:
|
||||
# Install Ansible.
|
||||
- pip install --user boto -U
|
||||
- pip install --user ansible
|
||||
- pip install --user netaddr
|
||||
- pip install --user apache-libcloud
|
||||
|
||||
cache:
|
||||
- directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.local
|
||||
|
||||
before_script:
|
||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||
- $HOME/.local/bin/ansible-playbook --version
|
||||
- cp tests/ansible.cfg .
|
||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
|
||||
|
||||
script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Create cluster
|
||||
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
## Create a POD
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
||||
## Ping the between 2 pod
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
|
||||
|
||||
after_script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
161
.travis.yml.bak
Normal file
161
.travis.yml.bak
Normal file
@@ -0,0 +1,161 @@
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
git:
|
||||
depth: 5
|
||||
|
||||
env:
|
||||
global:
|
||||
GCE_USER=travis
|
||||
SSH_USER=$GCE_USER
|
||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||
CONTAINER_ENGINE=docker
|
||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||
GS_ACCESS_KEY_ID=$GS_KEY
|
||||
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||
CLUSTER_MODE=default
|
||||
BOOTSTRAP_OS=none
|
||||
matrix:
|
||||
# Debian Jessie
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=asia-east1-a
|
||||
CLUSTER_MODE=ha
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=europe-west1-c
|
||||
CLUSTER_MODE=default
|
||||
|
||||
# Centos 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=centos-7
|
||||
CLOUD_REGION=asia-northeast1-c
|
||||
CLUSTER_MODE=default
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=centos-7
|
||||
CLOUD_REGION=us-central1-b
|
||||
CLUSTER_MODE=ha
|
||||
|
||||
# Redhat 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=rhel-7
|
||||
CLOUD_REGION=us-east1-c
|
||||
CLUSTER_MODE=default
|
||||
|
||||
# CoreOS stable
|
||||
#- >-
|
||||
# KUBE_NETWORK_PLUGIN=weave
|
||||
# CLOUD_IMAGE=coreos-stable
|
||||
# CLOUD_REGION=europe-west1-b
|
||||
# CLUSTER_MODE=ha
|
||||
# BOOTSTRAP_OS=coreos
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=coreos-stable
|
||||
CLOUD_REGION=us-west1-b
|
||||
CLUSTER_MODE=default
|
||||
BOOTSTRAP_OS=coreos
|
||||
|
||||
# Extra cases for separated roles
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=rhel-7
|
||||
CLOUD_REGION=asia-northeast1-b
|
||||
CLUSTER_MODE=separate
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=europe-west1-d
|
||||
CLUSTER_MODE=separate
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=coreos-stable
|
||||
CLOUD_REGION=us-central1-f
|
||||
CLUSTER_MODE=separate
|
||||
BOOTSTRAP_OS=coreos
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
||||
|
||||
before_install:
|
||||
# Install Ansible.
|
||||
- pip install --user ansible
|
||||
- pip install --user netaddr
|
||||
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
||||
- pip install --user apache-libcloud==0.20.1
|
||||
- pip install --user boto==2.9.0 -U
|
||||
# Load cached docker images
|
||||
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
||||
|
||||
cache:
|
||||
- directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.local
|
||||
- /var/tmp/releases
|
||||
|
||||
before_script:
|
||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||
- $HOME/.local/bin/ansible-playbook --version
|
||||
- cp tests/ansible.cfg .
|
||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||
|
||||
script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Create cluster with netchecker app deployed
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e local_release_dir=/var/tmp/releases
|
||||
-e deploy_netchecker=true
|
||||
cluster.yml
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
## Ping the between 2 pod
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
## Advanced DNS checks
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
after_script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
17
README.md
17
README.md
@@ -2,9 +2,9 @@
|
||||
|
||||
##Deploy a production ready kubernetes cluster
|
||||
|
||||
If you have questions, you can [invite yourself](https://slack.kubespray.io/) to **chat** with us on Slack! [](https://kubespray.slack.com)
|
||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
||||
|
||||
- Can be deployed on **AWS, GCE, OpenStack or Baremetal**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||
- **High available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Support most popular **Linux distributions**
|
||||
@@ -13,7 +13,7 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
||||
**Ansible** usual commands <br>
|
||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||
|
||||
@@ -26,6 +26,7 @@ To deploy the cluster you can use :
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [OpenStack](docs/openstack.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Azure](docs/azure.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
|
||||
@@ -41,10 +42,10 @@ Supported Linux distributions
|
||||
Versions
|
||||
--------------
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.0 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.6 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
|
||||
[weave](http://weave.works/) v1.6.1 <br>
|
||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
||||
|
||||
@@ -74,7 +75,7 @@ The choice is defined with the variable `kube_network_plugin`
|
||||
|
||||
## CI Tests
|
||||
|
||||
[](https://travis-ci.org/kubespray/kargo) </br>
|
||||
[](https://travis-ci.org/kubernetes-incubator/kargo) </br>
|
||||
|
||||
### Google Compute Engine
|
||||
|
||||
|
||||
17
Vagrantfile
vendored
17
Vagrantfile
vendored
@@ -16,7 +16,7 @@ $vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$box = "bento/ubuntu-14.04"
|
||||
$box = "bento/ubuntu-16.04"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
@@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
end
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
@@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
|
||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||
config.vm.hostname = vm_name
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||
config.proxy.no_proxy = $no_proxy
|
||||
end
|
||||
|
||||
if $expose_docker_tcp
|
||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||
end
|
||||
@@ -80,7 +93,7 @@ Vagrant.configure("2") do |config|
|
||||
"flannel_interface" => ip,
|
||||
"flannel_backend_type" => "host-gw",
|
||||
"local_release_dir" => "/vagrant/temp",
|
||||
"download_run_once" => "True"
|
||||
"download_run_once" => "False"
|
||||
}
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
|
||||
42
cluster.yml
42
cluster.yml
@@ -1,36 +1,66 @@
|
||||
---
|
||||
- hosts: all
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- bastion-ssh-config
|
||||
tags: [localhost, bastion]
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: true
|
||||
gather_facts: false
|
||||
vars:
|
||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- bootstrap-os
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
|
||||
- hosts: all
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:!k8s-cluster
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
|
||||
- hosts: etcd:!k8s-cluster
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: etcd, tags: etcd }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: network_plugin/calico/rr, tags: network }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, tags: resolvconf }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubernetes-apps/lib, tags: apps }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
2
contrib/azurerm/.gitignore
vendored
Normal file
2
contrib/azurerm/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.generated
|
||||
/inventory
|
||||
64
contrib/azurerm/README.md
Normal file
64
contrib/azurerm/README.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Kubernetes on Azure with Azure Resource Group Templates
|
||||
|
||||
Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates)
|
||||
|
||||
## Status
|
||||
|
||||
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
|
||||
- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
|
||||
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
||||
|
||||
## Configuration through group_vars/all
|
||||
|
||||
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
||||
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||
experience.
|
||||
|
||||
## Bastion host
|
||||
|
||||
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||
also removes all public IPs from all other VMs.
|
||||
|
||||
## Generating and applying
|
||||
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
take care about creating/modifying whatever is needed.
|
||||
|
||||
## Clearing a resource group
|
||||
|
||||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
|
||||
## Generating an inventory for kargo
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kargo, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kargo-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
19
contrib/azurerm/apply-rg.sh
Executable file
19
contrib/azurerm/apply-rg.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
14
contrib/azurerm/clear-rg.sh
Executable file
14
contrib/azurerm/clear-rg.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||
12
contrib/azurerm/generate-inventory.sh
Executable file
12
contrib/azurerm/generate-inventory.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
5
contrib/azurerm/generate-inventory.yml
Normal file
5
contrib/azurerm/generate-inventory.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
5
contrib/azurerm/generate-templates.yml
Normal file
5
contrib/azurerm/generate-templates.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
26
contrib/azurerm/group_vars/all
Normal file
26
contrib/azurerm/group_vars/all
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
# Due to some Azure limitations, this name must be globally unique
|
||||
cluster_name: example
|
||||
|
||||
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||
# node that can be used to access the masters and minions
|
||||
use_bastion: false
|
||||
|
||||
number_of_k8s_masters: 3
|
||||
number_of_k8s_nodes: 3
|
||||
|
||||
masters_vm_size: Standard_A2
|
||||
masters_os_disk_size: 1000
|
||||
|
||||
minions_vm_size: Standard_A2
|
||||
minions_os_disk_size: 1000
|
||||
|
||||
admin_username: devops
|
||||
admin_password: changeme
|
||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||
|
||||
# Azure CIDRs
|
||||
azure_vnet_cidr: 10.0.0.0/8
|
||||
azure_admin_cidr: 10.241.2.0/24
|
||||
azure_masters_cidr: 10.0.4.0/24
|
||||
azure_minions_cidr: 10.240.0.0/16
|
||||
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- set_fact:
|
||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||
|
||||
- name: Generate inventory
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
@@ -0,0 +1,33 @@
|
||||
|
||||
{% for vm in vm_list %}
|
||||
{% if not use_bastion or vm.name == 'bastion' %}
|
||||
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||
{% else %}
|
||||
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[etcd]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'etcd' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: "2015-06-15"
|
||||
|
||||
virtualNetworkName: "KubVNET"
|
||||
|
||||
subnetAdminName: "ad-subnet"
|
||||
subnetMastersName: "master-subnet"
|
||||
subnetMinionsName: "minion-subnet"
|
||||
|
||||
routeTableName: "routetable"
|
||||
securityGroupName: "secgroup"
|
||||
|
||||
nameSuffix: "{{cluster_name}}"
|
||||
|
||||
availabilitySetMasters: "master-avs"
|
||||
availabilitySetMinions: "minion-avs"
|
||||
|
||||
faultDomainCount: 3
|
||||
updateDomainCount: 10
|
||||
|
||||
bastionVmSize: Standard_A0
|
||||
bastionVMName: bastion
|
||||
bastionIPAddressName: bastion-pubip
|
||||
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||
|
||||
imageReference:
|
||||
publisher: "OpenLogic"
|
||||
offer: "CentOS"
|
||||
sku: "7.2"
|
||||
version: "latest"
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "Standard_LRS"
|
||||
|
||||
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
- set_fact:
|
||||
base_dir: "{{playbook_dir}}/.generated/"
|
||||
|
||||
- file: path={{base_dir}} state=directory recurse=true
|
||||
|
||||
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
- availability-sets.json
|
||||
- bastion.json
|
||||
- masters.json
|
||||
- minions.json
|
||||
- clear-rg.json
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"type": "Microsoft.Compute/availabilitySets",
|
||||
"name": "{{availabilitySetMasters}}",
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Compute/availabilitySets",
|
||||
"name": "{{availabilitySetMinions}}",
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||
"subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]"
|
||||
},
|
||||
"resources": [
|
||||
{% if use_bastion %}
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "{{bastionIPAddressName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "{{bastionVMName}}-nic",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]"
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "BastionIpConfig",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]"
|
||||
},
|
||||
"subnet": {
|
||||
"id": "[variables('subnetAdminRef')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "{{bastionVMName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "bastion"
|
||||
},
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "{{bastionVmSize}}"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "{{bastionVMName}}",
|
||||
"adminUsername": "{{admin_username}}",
|
||||
"adminPassword": "{{admin_password}}",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": "true",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"imageReference": {{imageReferenceJson}},
|
||||
"osDisk": {
|
||||
"name": "osdisk",
|
||||
"vhd": {
|
||||
"uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]"
|
||||
},
|
||||
"caching": "ReadWrite",
|
||||
"createOption": "FromImage"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {},
|
||||
"variables": {},
|
||||
"resources": [],
|
||||
"outputs": {}
|
||||
}
|
||||
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
@@ -0,0 +1,196 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
"lbDomainName": "{{nameSuffix}}-api",
|
||||
"lbPublicIPAddressName": "kubernetes-api-pubip",
|
||||
"lbPublicIPAddressType": "Static",
|
||||
"lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]",
|
||||
"lbName": "kubernetes-api",
|
||||
"lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]",
|
||||
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||
"kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]"
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "[variables('lbPublicIPAddressName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]",
|
||||
"dnsSettings": {
|
||||
"domainNameLabel": "[variables('lbDomainName')]"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"name": "[variables('lbName')]",
|
||||
"type": "Microsoft.Network/loadBalancers",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"frontendIPConfigurations": [
|
||||
{
|
||||
"name": "kube-api-frontend",
|
||||
"properties": {
|
||||
"publicIPAddress": {
|
||||
"id": "[variables('lbPublicIPAddressID')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"backendAddressPools": [
|
||||
{
|
||||
"name": "kube-api-backend"
|
||||
}
|
||||
],
|
||||
"loadBalancingRules": [
|
||||
{
|
||||
"name": "kube-api",
|
||||
"properties": {
|
||||
"frontendIPConfiguration": {
|
||||
"id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]"
|
||||
},
|
||||
"backendAddressPool": {
|
||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||
},
|
||||
"protocol": "tcp",
|
||||
"frontendPort": 443,
|
||||
"backendPort": 443,
|
||||
"enableFloatingIP": false,
|
||||
"idleTimeoutInMinutes": 5,
|
||||
"probe": {
|
||||
"id": "[concat(variables('lbID'), '/probes/kube-api')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"probes": [
|
||||
{
|
||||
"name": "kube-api",
|
||||
"properties": {
|
||||
"protocol": "tcp",
|
||||
"port": 443,
|
||||
"intervalInSeconds": 5,
|
||||
"numberOfProbes": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{% for i in range(number_of_k8s_masters) %}
|
||||
{% if not use_bastion %}
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "master-{{i}}-pubip",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static"
|
||||
}
|
||||
},
|
||||
{% endif %}
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "master-{{i}}-nic",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
{% if not use_bastion %}
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]",
|
||||
{% endif %}
|
||||
"[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "MastersIpConfig",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
{% if not use_bastion %}
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]"
|
||||
},
|
||||
{% endif %}
|
||||
"subnet": {
|
||||
"id": "[variables('kubeMastersSubnetRef')]"
|
||||
},
|
||||
"loadBalancerBackendAddressPools": [
|
||||
{
|
||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||
},
|
||||
"enableIPForwarding": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "master-{{i}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-master,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
"availabilitySet": {
|
||||
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]"
|
||||
},
|
||||
"hardwareProfile": {
|
||||
"vmSize": "{{masters_vm_size}}"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "master-{{i}}",
|
||||
"adminUsername": "{{admin_username}}",
|
||||
"adminPassword": "{{admin_password}}",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"imageReference": {{imageReferenceJson}},
|
||||
"osDisk": {
|
||||
"name": "ma{{nameSuffix}}{{i}}",
|
||||
"vhd": {
|
||||
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]"
|
||||
},
|
||||
"caching": "ReadWrite",
|
||||
"createOption": "FromImage",
|
||||
"diskSizeGB": "{{masters_os_disk_size}}"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||
"kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]"
|
||||
},
|
||||
"resources": [
|
||||
{% for i in range(number_of_k8s_nodes) %}
|
||||
{% if not use_bastion %}
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "minion-{{i}}-pubip",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static"
|
||||
}
|
||||
},
|
||||
{% endif %}
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "minion-{{i}}-nic",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
{% if not use_bastion %}
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]"
|
||||
{% endif %}
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "MinionsIpConfig",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
{% if not use_bastion %}
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]"
|
||||
},
|
||||
{% endif %}
|
||||
"subnet": {
|
||||
"id": "[variables('kubeMinionsSubnetRef')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||
},
|
||||
"enableIPForwarding": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "minion-{{i}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
"availabilitySet": {
|
||||
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]"
|
||||
},
|
||||
"hardwareProfile": {
|
||||
"vmSize": "{{minions_vm_size}}"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "minion-{{i}}",
|
||||
"adminUsername": "{{admin_username}}",
|
||||
"adminPassword": "{{admin_password}}",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "{{sshKeyPath}}",
|
||||
"keyData": "{{ssh_public_key}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"imageReference": {{imageReferenceJson}},
|
||||
"osDisk": {
|
||||
"name": "mi{{nameSuffix}}{{i}}",
|
||||
"vhd": {
|
||||
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]"
|
||||
},
|
||||
"caching": "ReadWrite",
|
||||
"createOption": "FromImage",
|
||||
"diskSizeGB": "{{minions_os_disk_size}}"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
@@ -0,0 +1,109 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/routeTables",
|
||||
"name": "{{routeTableName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"routes": [
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"name": "{{virtualNetworkName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]"
|
||||
],
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"{{azure_vnet_cidr}}"
|
||||
]
|
||||
},
|
||||
"subnets": [
|
||||
{
|
||||
"name": "{{subnetMastersName}}",
|
||||
"properties": {
|
||||
"addressPrefix": "{{azure_masters_cidr}}",
|
||||
"routeTable": {
|
||||
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "{{subnetMinionsName}}",
|
||||
"properties": {
|
||||
"addressPrefix": "{{azure_minions_cidr}}",
|
||||
"routeTable": {
|
||||
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
{% if use_bastion %}
|
||||
,{
|
||||
"name": "{{subnetAdminName}}",
|
||||
"properties": {
|
||||
"addressPrefix": "{{azure_admin_cidr}}",
|
||||
"routeTable": {
|
||||
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"name": "{{securityGroupName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{% if not use_bastion %}
|
||||
{
|
||||
"name": "ssh",
|
||||
"properties": {
|
||||
"description": "Allow SSH",
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "22",
|
||||
"sourceAddressPrefix": "Internet",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 100,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
},
|
||||
{% endif %}
|
||||
{
|
||||
"name": "kube-api",
|
||||
"properties": {
|
||||
"description": "Allow secure kube-api",
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "443",
|
||||
"sourceAddressPrefix": "Internet",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 101,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"resources": [],
|
||||
"dependsOn": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
},
|
||||
"variables": {
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"type": "Microsoft.Storage/storageAccounts",
|
||||
"name": "{{storageAccountName}}",
|
||||
"location": "[resourceGroup().location]",
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
"accountType": "{{storageAccountType}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
297
contrib/inventory_builder/inventory.py
Normal file
297
contrib/inventory_builder/inventory.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/python3
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Usage: inventory.py ip1 [ip2 ...]
|
||||
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
#
|
||||
# Advanced usage:
|
||||
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||
# Delete a host: inventory.py -10.10.1.3
|
||||
# Delete a host by id: inventory.py -node1
|
||||
#
|
||||
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
|
||||
# YAML file should be in the following format:
|
||||
# group1:
|
||||
# host1:
|
||||
# ip: X.X.X.X
|
||||
# var: val
|
||||
# group2:
|
||||
# host2:
|
||||
# ip: X.X.X.X
|
||||
|
||||
from collections import OrderedDict
|
||||
try:
|
||||
import configparser
|
||||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROLES = ['kube-master', 'all', 'k8s-cluster:children', 'kube-node', 'etcd']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
|
||||
|
||||
def get_var_as_bool(name, default):
|
||||
value = os.environ.get(name, '')
|
||||
return _boolean_states.get(value.lower(), default)
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
|
||||
|
||||
class KargoInventory(object):
|
||||
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||
delimiters=('\t', ' '))
|
||||
self.config_file = config_file
|
||||
if self.config_file:
|
||||
self.config.read(self.config_file)
|
||||
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
self.set_etcd(list(self.hosts.keys())[:3])
|
||||
else: # Show help if no options
|
||||
self.show_help()
|
||||
sys.exit(0)
|
||||
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def write_config(self, config_file):
|
||||
if config_file:
|
||||
with open(config_file, 'w') as f:
|
||||
self.config.write(f)
|
||||
else:
|
||||
print("WARNING: Unable to save config. Make sure you set "
|
||||
"CONFIG_FILE env var.")
|
||||
|
||||
def debug(self, msg):
|
||||
if DEBUG:
|
||||
print("DEBUG: {0}".format(msg))
|
||||
|
||||
def get_ip_from_opts(self, optstring):
|
||||
opts = optstring.split(' ')
|
||||
for opt in opts:
|
||||
if '=' not in opt:
|
||||
continue
|
||||
k, v = opt.split('=')
|
||||
if k == "ip":
|
||||
return v
|
||||
raise ValueError("IP parameter not found in options")
|
||||
|
||||
def ensure_required_groups(self, groups):
|
||||
for group in groups:
|
||||
try:
|
||||
self.debug("Adding group {0}".format(group))
|
||||
self.config.add_section(group)
|
||||
except configparser.DuplicateSectionError:
|
||||
pass
|
||||
|
||||
def get_host_id(self, host):
|
||||
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||
try:
|
||||
short_hostname = host.split('.')[0]
|
||||
return int(re.findall("\d+$", short_hostname)[-1])
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
def build_hostnames(self, changed_hosts):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
try:
|
||||
for host, opts in self.config.items('all'):
|
||||
existing_hosts[host] = opts
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except configparser.NoSectionError:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
all_hosts.pop(realhost)
|
||||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
elif host[0].isdigit():
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
|
||||
host, host)
|
||||
elif host[0].isalpha():
|
||||
raise Exception("Adding hosts by hostname is not supported.")
|
||||
|
||||
return all_hosts
|
||||
|
||||
def exists_hostname(self, existing_hosts, hostname):
|
||||
return hostname in existing_hosts.keys()
|
||||
|
||||
def exists_ip(self, existing_hosts, ip):
|
||||
for host_opts in existing_hosts.values():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_host_by_ip(self, existing_hosts, ip):
|
||||
for hostname, host_opts in existing_hosts.items():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
del existing_hosts[hostname]
|
||||
return
|
||||
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.config.sections():
|
||||
for host, _ in self.config.items(role):
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug("Host {0} removed from role {1}".format(host,
|
||||
role))
|
||||
self.config.remove_option(role, host)
|
||||
|
||||
def add_host_to_group(self, group, host, opts=""):
|
||||
self.debug("adding host {0} to group {1}".format(host, group))
|
||||
self.config.set(group, host, opts)
|
||||
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube-master', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('etcd', host)
|
||||
|
||||
def load_file(self, files=None):
|
||||
'''Directly loads JSON, or YAML file to inventory.'''
|
||||
|
||||
if not files:
|
||||
raise Exception("No input file specified.")
|
||||
|
||||
import json
|
||||
import yaml
|
||||
|
||||
for filename in list(files):
|
||||
# Try JSON, then YAML
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
except ValueError:
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = yaml.load(f)
|
||||
print("yaml")
|
||||
except ValueError:
|
||||
raise Exception("Cannot read %s as JSON, YAML, or CSV",
|
||||
filename)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
self.set_k8s_cluster()
|
||||
for group, hosts in data.items():
|
||||
self.ensure_required_groups([group])
|
||||
for host, opts in hosts.items():
|
||||
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
|
||||
for key, val in opts.items():
|
||||
if key == "ip":
|
||||
continue
|
||||
optstring += " {0}={1}".format(key, val)
|
||||
|
||||
self.add_host_to_group('all', host, optstring)
|
||||
self.add_host_to_group(group, host)
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def parse_command(self, command, args=None):
|
||||
if command == 'help':
|
||||
self.show_help()
|
||||
elif command == 'print_cfg':
|
||||
self.print_config()
|
||||
elif command == 'print_ips':
|
||||
self.print_ips()
|
||||
elif command == 'load':
|
||||
self.load_file(args)
|
||||
else:
|
||||
raise Exception("Invalid command specified.")
|
||||
|
||||
def show_help(self):
|
||||
help_text = '''Usage: inventory.py ip1 [ip2 ...]
|
||||
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
|
||||
Available commands:
|
||||
help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
|
||||
Advanced usage:
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1'''
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
self.config.write(sys.stdout)
|
||||
|
||||
def print_ips(self):
|
||||
ips = []
|
||||
for host, opts in self.config.items('all'):
|
||||
ips.append(self.get_ip_from_opts(opts))
|
||||
print(' '.join(ips))
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KargoInventory(argv, CONFIG_FILE)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
1
contrib/inventory_builder/requirements.txt
Normal file
1
contrib/inventory_builder/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
configparser>=3.3.0
|
||||
3
contrib/inventory_builder/setup.cfg
Normal file
3
contrib/inventory_builder/setup.cfg
Normal file
@@ -0,0 +1,3 @@
|
||||
[metadata]
|
||||
name = kargo-inventory-builder
|
||||
version = 0.1
|
||||
29
contrib/inventory_builder/setup.py
Normal file
29
contrib/inventory_builder/setup.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=[],
|
||||
pbr=False)
|
||||
3
contrib/inventory_builder/test-requirements.txt
Normal file
3
contrib/inventory_builder/test-requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
hacking>=0.10.2
|
||||
pytest>=2.8.0
|
||||
mock>=1.3.0
|
||||
212
contrib/inventory_builder/tests/test_inventory.py
Normal file
212
contrib/inventory_builder/tests/test_inventory.py
Normal file
@@ -0,0 +1,212 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
||||
path = "./contrib/inventory_builder/"
|
||||
if path not in sys.path:
|
||||
sys.path.append(path)
|
||||
|
||||
import inventory
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
sys_mock.exit = mock.Mock()
|
||||
super(TestInventory, self).setUp()
|
||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||
self.inv = inventory.KargoInventory()
|
||||
|
||||
def test_get_ip_from_opts(self):
|
||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||
expected = "10.90.3.2"
|
||||
result = self.inv.get_ip_from_opts(optstring)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_ip_from_opts_invalid(self):
|
||||
optstring = "notanaddr=value something random!chars:D"
|
||||
self.assertRaisesRegexp(ValueError, "IP parameter not found",
|
||||
self.inv.get_ip_from_opts, optstring)
|
||||
|
||||
def test_ensure_required_groups(self):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertTrue(group in self.inv.config.sections())
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
'node3.xyz123.aaa']
|
||||
expected = [99, 1, 1, 1, 3]
|
||||
for hostname, expected in zip(hostnames, expected):
|
||||
result = self.inv.get_host_id(hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_host_id_invalid(self):
|
||||
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||
for hostname in bad_hostnames:
|
||||
self.assertRaisesRegexp(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||
self.inv.config['all'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.config['all'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.config['all'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
hostname = 'node1'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_negative(self):
|
||||
hostname = 'node99'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_delete_host_by_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = OrderedDict([
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, existing_hosts)
|
||||
|
||||
def test_delete_host_by_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
|
||||
def test_purge_invalid_hosts(self):
|
||||
proper_hostnames = ['node1', 'node2']
|
||||
bad_host = 'doesnotbelong2'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3'),
|
||||
('doesnotbelong2', 'whateveropts=ilike')])
|
||||
self.inv.config['all'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertTrue(bad_host not in self.inv.config['all'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
opts = 'ip=10.90.0.2'
|
||||
|
||||
self.inv.add_host_to_group(group, host, opts)
|
||||
self.assertEqual(self.inv.config[group].get(host), opts)
|
||||
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_all(self):
|
||||
group = 'all'
|
||||
hosts = OrderedDict([
|
||||
('node1', 'opt1'),
|
||||
('node2', 'opt2')])
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
for host, opt in hosts.items():
|
||||
self.assertEqual(self.inv.config[group].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster:children'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
28
contrib/inventory_builder/tox.ini
Normal file
28
contrib/inventory_builder/tox.ini
Normal file
@@ -0,0 +1,28 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8, py27
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = py.test -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
whitelist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg
|
||||
92
contrib/network-storage/glusterfs/README.md
Normal file
92
contrib/network-storage/glusterfs/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
||||
|
||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||
|
||||
## Using an Ansible inventory
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
```
|
||||
|
||||
## Using Terraform and Ansible
|
||||
|
||||
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "0"
|
||||
number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
||||
# GlusterFS variables
|
||||
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||
image_gfs = "Ubuntu 16.04"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
gfs_volume_size_in_gb = "50"
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||
|
||||
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: gfs-cluster
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv/lib }
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
44
contrib/network-storage/glusterfs/inventory.example
Normal file
44
contrib/network-storage/glusterfs/inventory.example
Normal file
@@ -0,0 +1,44 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
#
|
||||
# ## GlusterFS nodes
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
# [etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
# gfs_node2
|
||||
# gfs_node3
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
|
||||
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Ansible Role: GlusterFS
|
||||
|
||||
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||
|
||||
Installs and configures GlusterFS on Linux.
|
||||
|
||||
## Requirements
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
glusterfs_default_release: ""
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
|
||||
|
||||
## Dependencies
|
||||
|
||||
None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
## License
|
||||
|
||||
MIT / BSD
|
||||
|
||||
## Author Information
|
||||
|
||||
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.8"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: yes
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.8"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||
disk_volume_device_1: /dev/vdb
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -0,0 +1,82 @@
|
||||
---
|
||||
# Include variables and define needed variables.
|
||||
- name: Include OS-specific variables.
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Instal xfs package
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
yum: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: mounting new xfs filesystem
|
||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||
|
||||
# Setup/install tasks.
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume.
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup: filter=ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: yes
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: installed
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1 @@
|
||||
test file
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- role_under_test
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
glusterfs_daemon: glusterfs-server
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
name: glusterfs
|
||||
namespace: default
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"subsets": [
|
||||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
glusterfs:
|
||||
endpoints: glusterfs
|
||||
path: gluster
|
||||
readOnly: false
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
1
contrib/network-storage/glusterfs/roles/kubernetes-pv/lib
Symbolic link
1
contrib/network-storage/glusterfs/roles/kubernetes-pv/lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../../roles/kubernetes-apps/lib
|
||||
@@ -0,0 +1,2 @@
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
@@ -32,6 +32,6 @@ resource "null_resource" "ansible-provision" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\netcd\" >> inventory"
|
||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,14 +5,13 @@ Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
|
||||
should work on most modern installs of OpenStack that support the basic
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||
services.
|
||||
|
||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||
|
||||
* floating-ips are used for access
|
||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
||||
* you already have a suitable OS image in glance
|
||||
* you already have both an internal network and a floating-ip pool created
|
||||
* you have security-groups enabled
|
||||
@@ -24,16 +23,14 @@ There are some assumptions made to try and ensure it will work on your openstack
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources required to
|
||||
run Docker Swarm. It is also used to deploy and provision the software
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||
requirements.
|
||||
|
||||
### Prep
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack credentials are loaded in environment variables. This is
|
||||
how I do it:
|
||||
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
@@ -46,7 +43,7 @@ differences between OpenStack installs the Terraform does not attempt to create
|
||||
these for you.
|
||||
|
||||
By default Terraform will expect that your networks are called `internal` and
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||
|
||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||
|
||||
@@ -76,8 +73,36 @@ $ echo Setting up Terraform creds && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
|
||||
```
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "0"
|
||||
```
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||
|
||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||
|
||||
```
|
||||
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||
# This is the name of an image already available in your openstack installation.
|
||||
image_gfs = "Ubuntu 15.10"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||
gfs_volume_size_in_gb = "50"
|
||||
# The user needed for the image choosen for GlusterFS.
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
|
||||
If not using a tfvars file for your setup, then execute:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||
@@ -96,6 +121,13 @@ use the `terraform show` command.
|
||||
State path: contrib/terraform/openstack/terraform.tfstate
|
||||
```
|
||||
|
||||
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
```
|
||||
@@ -114,6 +146,8 @@ example-k8s-master-1 | SUCCESS => {
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like CoreOS, these might have a state `FAILED` due to CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
Deploy kubernetes:
|
||||
|
||||
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
@@ -0,0 +1 @@
|
||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
||||
@@ -1,9 +1,14 @@
|
||||
# Valid bootstrap options (required): ubuntu, coreos, none
|
||||
bootstrap_os: none
|
||||
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# Uncomment this line for CoreOS only.
|
||||
# Directory where python binary is installed
|
||||
@@ -28,6 +33,10 @@ kube_users:
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
@@ -51,6 +60,16 @@ cluster_name: cluster.local
|
||||
# but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
# Etcd access modes:
|
||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
etcd_multiaccess: true
|
||||
|
||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||
# kube_apiserver_port (default 443)
|
||||
loadbalancer_apiserver_localhost: true
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
kube_network_plugin: flannel
|
||||
|
||||
@@ -89,10 +108,12 @@ kube_apiserver_insecure_port: 8080 # (http)
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Do not install additional dnsmasq
|
||||
skip_dnsmasq: false
|
||||
# Upstream dns servers used by dnsmasq
|
||||
upstream_dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
#
|
||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
@@ -104,33 +125,41 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
|
||||
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# When azure is used, you need to also set the following variables.
|
||||
# cloud_provider:
|
||||
|
||||
# For multi masters architecture:
|
||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
||||
# configuration example with haproxy :
|
||||
# listen kubernetes-apiserver-https
|
||||
# bind 10.99.0.21:8383
|
||||
# option ssl-hello-chk
|
||||
# mode tcp
|
||||
# timeout client 3h
|
||||
# timeout server 3h
|
||||
# server master1 10.99.0.26:443
|
||||
# server master2 10.99.0.27:443
|
||||
# balance roundrobin
|
||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
# see docs/azure.md for details on how to get these values
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
# - name: kube-system/grafana
|
||||
|
||||
@@ -70,6 +70,28 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
@@ -89,6 +111,57 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage"
|
||||
}
|
||||
volume {
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#output "msg" {
|
||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||
#}
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
{
|
||||
"version": 1,
|
||||
"serial": 17,
|
||||
"modules": [
|
||||
{
|
||||
"path": [
|
||||
"root"
|
||||
],
|
||||
"outputs": {},
|
||||
"resources": {
|
||||
"openstack_compute_instance_v2.k8s_master.0": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s_master",
|
||||
"openstack_networking_floatingip_v2.k8s_master"
|
||||
],
|
||||
"primary": {
|
||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.12",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.12",
|
||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-master-1",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.86",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.12",
|
||||
"network.0.mac": "fa:16:3e:fb:82:1d",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "2",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"security_groups.3772290257": "example-k8s-master",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_instance_v2.k8s_master.1": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s_master",
|
||||
"openstack_networking_floatingip_v2.k8s_master"
|
||||
],
|
||||
"primary": {
|
||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.70",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.70",
|
||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-master-2",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.85",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.70",
|
||||
"network.0.mac": "fa:16:3e:33:98:e6",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "2",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"security_groups.3772290257": "example-k8s-master",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_instance_v2.k8s_node": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_networking_floatingip_v2.k8s_node"
|
||||
],
|
||||
"primary": {
|
||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.76",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.76",
|
||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-node-1",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.84",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.76",
|
||||
"network.0.mac": "fa:16:3e:53:57:bc",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "1",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_keypair_v2.k8s": {
|
||||
"type": "openstack_compute_keypair_v2",
|
||||
"primary": {
|
||||
"id": "kubernetes-example",
|
||||
"attributes": {
|
||||
"id": "kubernetes-example",
|
||||
"name": "kubernetes-example",
|
||||
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9nU6RPYCabjLH1LvJfpp9L8r8q5RZ6niS92zD95xpm2b2obVydWe0tCSFdmULBuvT8Q8YQ4qOG2g/oJlsGOsia+4CQjYEUV9CgTH9H5HK3vUOwtO5g2eFnYKSmI/4znHa0WYpQFnQK2kSSeCs2beTlJhc8vjfN/2HHmuny6SxNSbnCk/nZdwamxEONIVdjlm3CSBlq4PChT/D/uUqm/nOm0Zqdk9ZlTBkucsjiOCJeEzg4HioKmIH8ewqsKuS7kMADHPH98JMdBhTKbYbLrxTC/RfiaON58WJpmdOA935TT5Td5aVQZoqe/i/5yFRp5fMG239jtfbM0Igu44TEIib pczarkowski@Pauls-MacBook-Pro.local\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_secgroup_v2.k8s": {
|
||||
"type": "openstack_compute_secgroup_v2",
|
||||
"primary": {
|
||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||
"attributes": {
|
||||
"description": "example - Kubernetes",
|
||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||
"name": "example-k8s",
|
||||
"rule.#": "5",
|
||||
"rule.112275015.cidr": "",
|
||||
"rule.112275015.from_group_id": "",
|
||||
"rule.112275015.from_port": "1",
|
||||
"rule.112275015.id": "597170c9-b35a-45c0-8717-652a342f3fd6",
|
||||
"rule.112275015.ip_protocol": "tcp",
|
||||
"rule.112275015.self": "true",
|
||||
"rule.112275015.to_port": "65535",
|
||||
"rule.2180185248.cidr": "0.0.0.0/0",
|
||||
"rule.2180185248.from_group_id": "",
|
||||
"rule.2180185248.from_port": "-1",
|
||||
"rule.2180185248.id": "ffdcdd5e-f18b-4537-b502-8849affdfed9",
|
||||
"rule.2180185248.ip_protocol": "icmp",
|
||||
"rule.2180185248.self": "false",
|
||||
"rule.2180185248.to_port": "-1",
|
||||
"rule.3267409695.cidr": "",
|
||||
"rule.3267409695.from_group_id": "",
|
||||
"rule.3267409695.from_port": "-1",
|
||||
"rule.3267409695.id": "4f91d9ca-940c-4f4d-9ce1-024cbd7d9c54",
|
||||
"rule.3267409695.ip_protocol": "icmp",
|
||||
"rule.3267409695.self": "true",
|
||||
"rule.3267409695.to_port": "-1",
|
||||
"rule.635693822.cidr": "",
|
||||
"rule.635693822.from_group_id": "",
|
||||
"rule.635693822.from_port": "1",
|
||||
"rule.635693822.id": "c6816e5b-a1a4-4071-acce-d09b92d14d49",
|
||||
"rule.635693822.ip_protocol": "udp",
|
||||
"rule.635693822.self": "true",
|
||||
"rule.635693822.to_port": "65535",
|
||||
"rule.836640770.cidr": "0.0.0.0/0",
|
||||
"rule.836640770.from_group_id": "",
|
||||
"rule.836640770.from_port": "22",
|
||||
"rule.836640770.id": "8845acba-636b-4c23-b9e2-5bff76d9008d",
|
||||
"rule.836640770.ip_protocol": "tcp",
|
||||
"rule.836640770.self": "false",
|
||||
"rule.836640770.to_port": "22"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_secgroup_v2.k8s_master": {
|
||||
"type": "openstack_compute_secgroup_v2",
|
||||
"primary": {
|
||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||
"attributes": {
|
||||
"description": "example - Kubernetes Master",
|
||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||
"name": "example-k8s-master",
|
||||
"rule.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_master.0": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||
"attributes": {
|
||||
"address": "173.247.105.12",
|
||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_master.1": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||
"attributes": {
|
||||
"address": "173.247.105.70",
|
||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_node": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||
"attributes": {
|
||||
"address": "173.247.105.76",
|
||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"version": 1,
|
||||
"serial": 16,
|
||||
"modules": [
|
||||
{
|
||||
"path": [
|
||||
"root"
|
||||
],
|
||||
"outputs": {},
|
||||
"resources": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -6,10 +6,26 @@ variable "number_of_k8s_masters" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes_no_floating_ip" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_gfs_nodes_no_floating_ip" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "gfs_volume_size_in_gb" {
|
||||
default = 75
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
@@ -20,11 +36,21 @@ variable "image" {
|
||||
default = "ubuntu-14.04"
|
||||
}
|
||||
|
||||
variable "image_gfs" {
|
||||
description = "Glance image to use for GlusterFS"
|
||||
default = "ubuntu-16.04"
|
||||
}
|
||||
|
||||
variable "ssh_user" {
|
||||
description = "used to fill out tags for ansible inventory"
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "ssh_user_gfs" {
|
||||
description = "used to fill out tags for ansible inventory"
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
default = 3
|
||||
}
|
||||
@@ -33,6 +59,9 @@ variable "flavor_k8s_node" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
description = "name of the internal network to use"
|
||||
|
||||
@@ -309,6 +309,7 @@ def openstack_host(resource, module_name):
|
||||
attrs = {
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
'id': raw_attrs['id'],
|
||||
@@ -346,6 +347,15 @@ def openstack_host(resource, module_name):
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
|
||||
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
|
||||
device_index = 1
|
||||
for key, value in raw_attrs.items():
|
||||
match = re.search("^volume.*.device$", key)
|
||||
if match:
|
||||
attrs['disk_volume_device_'+str(device_index)] = value
|
||||
device_index += 1
|
||||
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||
|
||||
@@ -48,3 +48,89 @@ etcd
|
||||
Group vars
|
||||
--------------
|
||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
||||
|
||||
Ansible tags
|
||||
------------
|
||||
The following tags are defined in playbooks:
|
||||
|
||||
| Tag name | Used for
|
||||
|--------------------------|---------
|
||||
| apps | K8s apps definitions
|
||||
| azure | Cloud-provider Azure
|
||||
| bastion | Setup ssh config for bastion
|
||||
| bootstrap-os | Anything related to host OS configuration
|
||||
| calico | Network plugin Calico
|
||||
| canal | Network plugin Canal
|
||||
| cloud-provider | Cloud-provider related tasks
|
||||
| dnsmasq | Configuring DNS stack for hosts and K8s apps
|
||||
| docker | Configuring docker for hosts
|
||||
| download | Fetching container images to a delegate host
|
||||
| etcd | Configuring etcd cluster
|
||||
| etcd-pre-upgrade | Upgrading etcd cluster
|
||||
| etcd-secrets | Configuring etcd certs/keys
|
||||
| etchosts | Configuring /etc/hosts entries for hosts
|
||||
| facts | Gathering facts and misc check results
|
||||
| flannel | Network plugin flannel
|
||||
| gce | Cloud-provider GCP
|
||||
| hyperkube | Manipulations with K8s hyperkube image
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||
| k8s-secrets | Configuring K8s certs/keys
|
||||
| kpm | Installing K8s apps definitions with KPM
|
||||
| kube-apiserver | Configuring self-hosted kube-apiserver
|
||||
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
||||
| kubectl | Installing kubectl and bash completion
|
||||
| kubelet | Configuring kubelet service
|
||||
| kube-proxy | Configuring self-hosted kube-proxy
|
||||
| kube-scheduler | Configuring self-hosted kube-scheduler
|
||||
| localhost | Special steps for the localhost (ansible runner)
|
||||
| master | Configuring K8s master node role
|
||||
| netchecker | Installing netchecker K8s app
|
||||
| network | Configuring networking plugins for K8s
|
||||
| nginx | Configuring LB for kube-apiserver instances
|
||||
| node | Configuring K8s minion (compute) node role
|
||||
| openstack | Cloud-provider OpenStack
|
||||
| preinstall | Preliminary configuration steps
|
||||
| resolvconf | Configuring /etc/resolv.conf for hosts/apps
|
||||
| upgrade | Upgrading, f.e. container images/binaries
|
||||
| upload | Distributing images/binaries across hosts
|
||||
| weave | Network plugin Weave
|
||||
|
||||
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
||||
tags found in the codebase. New tags will be listed with the empty "Used for"
|
||||
field.
|
||||
|
||||
Example commands
|
||||
----------------
|
||||
Example command to filter and apply only DNS configuration tasks and skip
|
||||
everything else related to host OS configuration and downloading images of containers:
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||
```
|
||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
||||
```
|
||||
And this prepares all container images localy (at the ansible runner node) without installing
|
||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yaml \
|
||||
-e download_run_once=true -e download_localhost=true \
|
||||
--tags download --skip-tags upload,upgrade
|
||||
```
|
||||
|
||||
Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing.
|
||||
|
||||
Bastion host
|
||||
--------------
|
||||
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
||||
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||
bastion host.
|
||||
|
||||
```
|
||||
bastion ansible_ssh_host=x.x.x.x
|
||||
```
|
||||
|
||||
For more information about Ansible and bastion hosts, read
|
||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||
56
docs/azure.md
Normal file
56
docs/azure.md
Normal file
@@ -0,0 +1,56 @@
|
||||
Azure
|
||||
===============
|
||||
|
||||
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||
|
||||
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||
|
||||
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status)
|
||||
|
||||
### Parameters
|
||||
|
||||
Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file.
|
||||
|
||||
All of the values can be retrieved using the azure cli tool which can be downloaded here: https://docs.microsoft.com/en-gb/azure/xplat-cli-install
|
||||
After installation you have to run `azure login` to get access to your account.
|
||||
|
||||
|
||||
#### azure\_tenant\_id + azure\_subscription\_id
|
||||
run `azure account show` to retrieve your subscription id and tenant id:
|
||||
`azure_tenant_id` -> Tenant ID field
|
||||
`azure_subscription_id` -> ID field
|
||||
|
||||
|
||||
#### azure\_location
|
||||
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list`
|
||||
|
||||
|
||||
#### azure\_resource\_group
|
||||
The name of the resource group your instances are in, can be retrieved via `azure group list`
|
||||
|
||||
#### azure\_vnet\_name
|
||||
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
||||
|
||||
#### azure\_subnet\_name
|
||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
|
||||
|
||||
#### azure\_security\_group\_name
|
||||
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
||||
|
||||
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||
These will have to be generated first:
|
||||
- Create an Azure AD Application with:
|
||||
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
|
||||
The name, identifier-uri, home-page and the password can be choosen
|
||||
Note the AppId in the output.
|
||||
- Create Service principal for the application with:
|
||||
`azure ad sp create --applicationId AppId`
|
||||
This is the AppId from the last command
|
||||
- Create the role assignment with:
|
||||
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
||||
|
||||
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||
|
||||
## Provisioning Azure with Resource Group Templates
|
||||
|
||||
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||
116
docs/calico.md
116
docs/calico.md
@@ -10,23 +10,57 @@ docker ps | grep calico
|
||||
The **calicoctl** command allows to check the status of the network workloads.
|
||||
* Check the status of Calico nodes
|
||||
|
||||
```
|
||||
calicoctl node status
|
||||
```
|
||||
|
||||
or for versions prior *v1.0.0*:
|
||||
|
||||
```
|
||||
calicoctl status
|
||||
```
|
||||
|
||||
* Show the configured network subnet for containers
|
||||
|
||||
```
|
||||
calicoctl get ippool -o wide
|
||||
```
|
||||
|
||||
or for versions prior *v1.0.0*:
|
||||
|
||||
```
|
||||
calicoctl pool show
|
||||
```
|
||||
|
||||
* Show the workloads (ip addresses of containers and their located)
|
||||
|
||||
```
|
||||
calicoctl get workloadEndpoint -o wide
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```
|
||||
calicoctl get hostEndpoint -o wide
|
||||
```
|
||||
|
||||
or for versions prior *v1.0.0*:
|
||||
|
||||
```
|
||||
calicoctl endpoint show --detail
|
||||
```
|
||||
|
||||
##### Optionnal : BGP Peering with border routers
|
||||
##### Optional : Define network backend
|
||||
|
||||
In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value.
|
||||
|
||||
To re-define you need to edit the inventory and add a group variable `calico_network_backend`
|
||||
|
||||
```
|
||||
calico_network_backend: none
|
||||
```
|
||||
|
||||
##### Optional : BGP Peering with border routers
|
||||
|
||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||
@@ -37,3 +71,83 @@ you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
||||
```
|
||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
```
|
||||
|
||||
##### Optional : Define global AS number
|
||||
|
||||
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||
It defaults to "64512".
|
||||
|
||||
##### Optional : BGP Peering with route reflectors
|
||||
|
||||
At large scale you may want to disable full node-to-node mesh in order to
|
||||
optimize your BGP topology and improve `calico-node` containers' start times.
|
||||
|
||||
To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
||||
recommended here:
|
||||
|
||||
* https://hub.docker.com/r/calico/routereflector/
|
||||
* http://docs.projectcalico.org/v2.0/reference/private-cloud/l3-interconnect-fabric
|
||||
|
||||
You need to edit your inventory and add:
|
||||
|
||||
* `calico-rr` group with nodes in it. At the moment it's incompatible with
|
||||
`kube-node` due to BGP port conflict with `calico-node` container. So you
|
||||
should not have nodes in both `calico-rr` and `kube-node` groups.
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kargo inventory with route reflectors:
|
||||
|
||||
```
|
||||
[all]
|
||||
rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10
|
||||
rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11
|
||||
node2 ansible_ssh_host=10.210.1.12 ip=10.210.1.12
|
||||
node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13
|
||||
node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
|
||||
node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
|
||||
|
||||
[kube-master]
|
||||
node2
|
||||
node3
|
||||
|
||||
[etcd]
|
||||
node2
|
||||
node3
|
||||
node4
|
||||
|
||||
[kube-node]
|
||||
node2
|
||||
node3
|
||||
node4
|
||||
node5
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
[calico-rr]
|
||||
rr0
|
||||
rr1
|
||||
|
||||
[rack0]
|
||||
rr0
|
||||
rr1
|
||||
node2
|
||||
node3
|
||||
node4
|
||||
node5
|
||||
|
||||
[rack0:vars]
|
||||
cluster_id="1.0.0.1"
|
||||
```
|
||||
|
||||
The inventory above will deploy the following topology assuming that calico's
|
||||
`global_as_num` is set to `65400`:
|
||||
|
||||

|
||||
|
||||
Cloud providers configuration
|
||||
=============================
|
||||
|
||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
K8s DNS stack by Kargo
|
||||
======================
|
||||
|
||||
Kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
|
||||
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
||||
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
||||
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
||||
|
||||
Note, custom ``ndots`` values affect only the dnsmasq daemon set (explained below).
|
||||
While the kubedns has the ``ndots=5`` hardcoded, which is not recommended due to
|
||||
[DNS performance reasons](https://github.com/kubernetes/kubernetes/issues/14051).
|
||||
You can use config maps for the kubedns app to workaround the issue, which is
|
||||
yet in the Kargo scope.
|
||||
|
||||
Additional search (sub)domains may be defined in the ``searchdomains``
|
||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
||||
place, followed by external resolvers, for example:
|
||||
``nameservers`` vars. Intranet/cloud provider DNS resolvers should be specified
|
||||
in the first place, followed by external resolvers, for example:
|
||||
|
||||
```
|
||||
skip_dnsmasq: true
|
||||
@@ -21,7 +30,33 @@ or
|
||||
skip_dnsmasq: false
|
||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||
```
|
||||
The vars are explained below as well.
|
||||
The vars are explained below. For the early cluster deployment stage, when there
|
||||
is yet K8s cluster and apps exist, a user may expect local repos to be
|
||||
accessible via authoritative intranet resolvers. For that case, if none custom vars
|
||||
was specified, the default resolver is set to either the cloud provider default
|
||||
or `8.8.8.8`. And domain is set to the default ``dns_domain`` value as well.
|
||||
Later, the nameservers will be reconfigured to the DNS service IP that Kargo
|
||||
configures for K8s cluster.
|
||||
|
||||
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||
This is required for hostnet pods networking and for [kubelet to not exceed search domains
|
||||
limits](https://github.com/kubernetes/kubernetes/issues/9229).
|
||||
|
||||
Instead, new domain, search, nameserver records and options will be defined from the
|
||||
aforementioned vars:
|
||||
* Superseded via dhclient's DNS update hook.
|
||||
* Generated via cloud-init (CoreOS only).
|
||||
* Statically defined in the `/etc/resolv.conf`, if none of above is applicable.
|
||||
* Resolvconf's head/base files are disabled from populating anything into the
|
||||
`/etc/resolv.conf`.
|
||||
|
||||
It is important to note that multiple search domains combined with high ``ndots``
|
||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||
The dnsmasq DaemonSet can accept lower ``ndots`` values and return NXDOMAIN
|
||||
replies for [bogus internal FQDNS](https://github.com/kubernetes/kubernetes/issues/19634#issuecomment-253948954)
|
||||
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
||||
protective, but still recursive resolver in front of kubedns.
|
||||
|
||||
DNS configuration details
|
||||
-------------------------
|
||||
@@ -78,8 +113,7 @@ Limitations
|
||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||
for the SkyDNS ``ndots`` param via an
|
||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
||||
as expected as they require the ``ndots:7``.
|
||||
add-on, while SkyDNS supports it though.
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
|
||||
42
docs/downloads.md
Normal file
42
docs/downloads.md
Normal file
@@ -0,0 +1,42 @@
|
||||
Downloading binaries and containers
|
||||
===================================
|
||||
|
||||
Kargo supports several download/upload modes. The default is:
|
||||
|
||||
* Each node downloads binaries and container images on its own, which is
|
||||
``download_run_once: False``.
|
||||
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
|
||||
* For system managed containers, like kubelet or etcd, pull policy is
|
||||
``download_always_pull: False``, which is pull if only the wanted repo and
|
||||
tag/sha256 digest differs from that the host has.
|
||||
|
||||
There is also a "pull once, push many" mode as well:
|
||||
|
||||
* Override the ``download_run_once: True`` to download container images only once
|
||||
then push to cluster nodes in batches. The default delegate node
|
||||
for pushing images is the first `kube-master`.
|
||||
* If your ansible runner node (aka the admin node) have password-less sudo and
|
||||
docker enabled, you may want to define the ``download_localhost: True``, which
|
||||
makes that node a delegate for pushing images while running the deployment with
|
||||
ansible. This maybe the case if cluster nodes cannot access each over via ssh
|
||||
or you want to use local docker images as a cache for multiple clusters.
|
||||
|
||||
Container images and binary files are described by the vars like ``foo_version``,
|
||||
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||
``foo_image_tag`` or optional ``foo_digest_checksum`` for containers.
|
||||
|
||||
Container images may be defined by its repo and tag, for example:
|
||||
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
|
||||
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
|
||||
|
||||
Note, the sha256 digest and the image tag must be both specified and correspond
|
||||
to each other. The given example above is represented by the following vars:
|
||||
```
|
||||
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
||||
dnsmasq_image_repo: andyshinn/dnsmasq
|
||||
dnsmasq_image_tag: '2.72'
|
||||
```
|
||||
The full list of available vars may be found in the download's ansible role defaults.
|
||||
Those also allow to specify custom urls and local repositories for binaries and container
|
||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||
so the hosts can resolve those urls and repos.
|
||||
BIN
docs/figures/kargo-calico-rr.png
Normal file
BIN
docs/figures/kargo-calico-rr.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -17,3 +17,16 @@ kargo aws --instances 3
|
||||
```
|
||||
kargo deploy --aws -u centos -n calico
|
||||
```
|
||||
|
||||
Building your own inventory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_generator/inventory_generator.py)
|
||||
to create or modify an Ansible inventory. Currently, it is limited in
|
||||
functionality and is only use for making a basic Kargo cluster, but it does
|
||||
support creating large clusters.
|
||||
|
||||
@@ -5,10 +5,6 @@ The following components require a highly available endpoints:
|
||||
* etcd cluster,
|
||||
* kube-apiserver service instances.
|
||||
|
||||
The former provides the
|
||||
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
|
||||
the cluster members in HA fashion.
|
||||
|
||||
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
||||
achieve the same goal.
|
||||
|
||||
@@ -49,13 +45,15 @@ type. The following diagram shows how traffic to the apiserver is directed.
|
||||
|
||||

|
||||
|
||||
..note:: Kubernetes master nodes still use insecure localhost access because
|
||||
Note: Kubernetes master nodes still use insecure localhost access because
|
||||
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||
services.
|
||||
services. This makes backends receiving unencrypted traffic and may be a
|
||||
security issue when interconnecting different nodes, or maybe not, if those
|
||||
belong to the isolated management network without external access.
|
||||
|
||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||
provides access for external clients, while the internal LB accepts client
|
||||
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
|
||||
connections only to the localhost.
|
||||
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||
an example configuration for a HAProxy service acting as an external LB:
|
||||
```
|
||||
@@ -81,24 +79,19 @@ loadbalancer_apiserver:
|
||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||
the HAProxy service should as well be HA and requires a VIP management, which
|
||||
is out of scope of this doc.
|
||||
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||
localhost LB configuration.
|
||||
|
||||
Specifying an external LB overrides any internal localhost LB configuration.
|
||||
Note that for this example, the `kubernetes-apiserver-http` endpoint
|
||||
has backends receiving unencrypted traffic, which may be a security issue
|
||||
when interconnecting different nodes, or maybe not, if those belong to the
|
||||
isolated management network without external access.
|
||||
|
||||
In order to achieve HA for HAProxy instances, those must be running on the
|
||||
each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
|
||||
Access endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB | http://lc:p | http://lc:sp |
|
||||
| External LB, no internal | http://lc:p | https://lb:lp |
|
||||
| Local LB | http://lc:p | https://lc:sp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||
|
||||
Where:
|
||||
|
||||
@@ -8,12 +8,24 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
|
||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||
|
||||
* Override the ``download_run_once: true`` to download binaries and container
|
||||
images only once then push to nodes in batches.
|
||||
* Override the ``download_run_once: true`` and/or ``download_localhost: true``.
|
||||
See download modes for details.
|
||||
|
||||
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
||||
load on a delegate (the first K8s master node) then retrying failed
|
||||
push or download operations.
|
||||
|
||||
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
|
||||
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
|
||||
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
|
||||
Please note that limits must always be greater than or equal to requests.
|
||||
|
||||
* Tune CPU/memory limits and requests. Those are located in roles' defaults
|
||||
and named like ``foo_memory_limit``, ``foo_memory_requests`` and
|
||||
``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s
|
||||
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units will
|
||||
end up with the 'm' skipped for docker as well. This is required as docker does not
|
||||
understand k8s units well.
|
||||
|
||||
For example, when deploying 200 nodes, you may want to run ansible with
|
||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||
|
||||
41
docs/netcheck.md
Normal file
41
docs/netcheck.md
Normal file
@@ -0,0 +1,41 @@
|
||||
Network Checker Application
|
||||
===========================
|
||||
|
||||
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
|
||||
Network Checker Application from the 3rd side `l23network/mcp-netchecker` docker
|
||||
images. It consists of the server and agents trying to reach the server by usual
|
||||
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||
automagically verifies a pod to pod connectivity via the cluster IP and checks
|
||||
if DNS resolve is functioning as well.
|
||||
|
||||
The checks are run by agents on a periodic basis and cover standard and host network
|
||||
pods as well. The history of performed checks may be found in the agents' application
|
||||
logs.
|
||||
|
||||
To get the most recent and cluster-wide network connectivity report, run from
|
||||
any of the cluster nodes:
|
||||
```
|
||||
curl http://localhost:31081/api/v1/connectivity_check
|
||||
```
|
||||
Note that Kargo does not invoke the check but only deploys the application, if
|
||||
requested.
|
||||
|
||||
There are related application specifc variables:
|
||||
```
|
||||
netchecker_port: 31081
|
||||
agent_report_interval: 15
|
||||
netcheck_namespace: default
|
||||
agent_img: "quay.io/l23network/mcp-netchecker-agent:v0.1"
|
||||
server_img: "quay.io/l23network/mcp-netchecker-server:v0.1"
|
||||
```
|
||||
|
||||
Note that the application verifies DNS resolve for FQDNs comprising only the
|
||||
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
|
||||
``netchecker-service.default.cluster.local``. If you want to deploy the application
|
||||
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
|
||||
so the resulting search domain records to contain that namespace, like:
|
||||
|
||||
```
|
||||
search: foospace.cluster.local default.cluster.local ...
|
||||
nameserver: ...
|
||||
```
|
||||
@@ -1,6 +1,10 @@
|
||||
Kargo's roadmap
|
||||
=================
|
||||
|
||||
### Kubeadm
|
||||
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
||||
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
|
||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||
@@ -33,6 +37,7 @@ Kargo's roadmap
|
||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
|
||||
### Lifecycle
|
||||
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- Drain worker node when shutting down/deleting an instance
|
||||
|
||||
@@ -41,6 +46,7 @@ Kargo's roadmap
|
||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- Opencontrail
|
||||
- Canal
|
||||
- Cloud Provider native networking (instead of our network plugins)
|
||||
|
||||
### High availability
|
||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||
|
||||
54
docs/test_cases.md
Normal file
54
docs/test_cases.md
Normal file
@@ -0,0 +1,54 @@
|
||||
Travis CI test matrix
|
||||
=====================
|
||||
|
||||
GCE instances
|
||||
-------------
|
||||
|
||||
Here is the test matrix for the Travis CI gates:
|
||||
|
||||
| Network plugin| OS type| GCE region| Nodes layout|
|
||||
|-------------------------|-------------------------|-------------------------|-------------------------|
|
||||
| canal| debian-8-kubespray| asia-east1-a| ha|
|
||||
| calico| debian-8-kubespray| europe-west1-c| default|
|
||||
| flannel| centos-7| asia-northeast1-c| default|
|
||||
| calico| centos-7| us-central1-b| ha|
|
||||
| weave| rhel-7| us-east1-c| default|
|
||||
| canal| coreos-stable| us-west1-b| default|
|
||||
| canal| rhel-7| asia-northeast1-b| separate|
|
||||
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
|
||||
| calico| coreos-stable| us-central1-f| separate|
|
||||
|
||||
Where the nodes layout `default` is a non-HA two nodes setup with the separate `kube-node`
|
||||
and the `etcd` group merged with the `kube-master`. The `separate` layout is when
|
||||
there is only node of each type, which is a kube master, compute and etcd cluster member.
|
||||
And the `ha` layout stands for a two etcd nodes, two masters and a single worker node,
|
||||
partially intersecting though.
|
||||
|
||||
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
||||
|
||||
Hint: the command
|
||||
```
|
||||
bash scripts/gen_matrix.sh
|
||||
```
|
||||
will (hopefully) generate the CI test cases from the current ``.travis.yml``.
|
||||
|
||||
Gitlab CI test matrix
|
||||
=====================
|
||||
|
||||
GCE instances
|
||||
-------------
|
||||
|
||||
| Stage| Network plugin| OS type| GCE region| Nodes layout
|
||||
|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| part1| calico| coreos-stable| us-west1-b| separated|
|
||||
| part1| canal| debian-8-kubespray| us-east1-b| ha|
|
||||
| part1| weave| rhel-7| europe-west1-b| default|
|
||||
| part2| flannel| centos-7| us-west1-a| default|
|
||||
| part2| calico| debian-8-kubespray| us-central1-b| default|
|
||||
| part2| canal| coreos-stable| us-east1-b| default|
|
||||
| special| canal| rhel-7| us-east1-b| separated|
|
||||
| special| weave| ubuntu-1604-xenial| us-central1-b| separated|
|
||||
| special| calico| centos-7| europe-west1-b| ha|
|
||||
| special| weave| coreos-alpha| us-west1-a| ha|
|
||||
|
||||
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.
|
||||
47
docs/upgrades.md
Normal file
47
docs/upgrades.md
Normal file
@@ -0,0 +1,47 @@
|
||||
Upgrading Kubernetes in Kargo
|
||||
=============================
|
||||
|
||||
#### Description
|
||||
|
||||
Kargo handles upgrades the same way it handles initial deployment. That is to
|
||||
say that each component is laid down in a fixed order. You should be able to
|
||||
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
|
||||
also individually control versions of components by explicitly defining their
|
||||
versions. Here are all version vars for each component:
|
||||
|
||||
* docker_version
|
||||
* kube_version
|
||||
* etcd_version
|
||||
* calico_version
|
||||
* calico_cni_version
|
||||
* weave_version
|
||||
* flannel_version
|
||||
* kubedns_version
|
||||
|
||||
#### Example
|
||||
|
||||
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
||||
deploy the following way:
|
||||
|
||||
```
|
||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
|
||||
```
|
||||
|
||||
And then repeat with v1.4.6 as kube_version:
|
||||
|
||||
```
|
||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
||||
```
|
||||
|
||||
#### Upgrade order
|
||||
|
||||
As mentioned above, components are upgraded in the order in which they were
|
||||
installed in the Ansible playbook. The order of component installation is as
|
||||
follows:
|
||||
|
||||
# Docker
|
||||
# etcd
|
||||
# kubelet and kube-proxy
|
||||
# network_plugin (such as Calico or Weave)
|
||||
# kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||
# Add-ons (such as KubeDNS)
|
||||
100
docs/vars.md
Normal file
100
docs/vars.md
Normal file
@@ -0,0 +1,100 @@
|
||||
Configurable Parameters in Kargo
|
||||
================================
|
||||
|
||||
#### Generic Ansible variables
|
||||
|
||||
You can view facts gathered by Ansible automatically
|
||||
[here](http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts).
|
||||
|
||||
Some variables of note include:
|
||||
|
||||
* *ansible_user*: user to connect to via SSH
|
||||
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
||||
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
||||
|
||||
#### Common vars that are used in Kargo
|
||||
|
||||
* *calico_version* - Specify version of Calico to use
|
||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||
* *docker_version* - Specify version of Docker to used (should be quoted
|
||||
string)
|
||||
* *etcd_version* - Specify version of ETCD to use
|
||||
* *ipip* - Enables Calico ipip encapsulation by default
|
||||
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
|
||||
resides
|
||||
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
|
||||
* *kube_network_plugin* - Changes k8s plugin to Calico
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||
|
||||
#### Addressing variables
|
||||
|
||||
* *ip* - IP to use for binding services (host var)
|
||||
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
||||
deploying from a cloud, such as OpenStack or GCE and you have separate
|
||||
public/floating and private IPs.
|
||||
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
|
||||
and access_ip are undefined
|
||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||
address instead of localhost for kube-masters and kube-master[0] for
|
||||
kube-nodes. See more details in the
|
||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
||||
* *loadbalancer_apiserver_localhost* - If enabled, all hosts will connect to
|
||||
the apiserver internally load balanced endpoint. See more details in the
|
||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
||||
|
||||
#### Cluster variables
|
||||
|
||||
Kubernetes needs some parameters in order to get deployed. These are the
|
||||
following default cluster paramters:
|
||||
|
||||
* *cluster_name* - Name of cluster (default is cluster.local)
|
||||
* *domain_name* - Name of cluster DNS domain (default is cluster.local)
|
||||
* *kube_network_plugin* - Plugin to use for container networking
|
||||
* *kube_service_addresses* - Subnet for cluster IPs (default is
|
||||
10.233.0.0/18). Must not overlap with kube_pods_subnet
|
||||
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||
overlap with kube_service_addresses.
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||
* *dns_setup* - Enables dnsmasq
|
||||
* *dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||
OpenStack (default is unset)
|
||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||
Kubernetes
|
||||
|
||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
|
||||
|
||||
#### DNS variables
|
||||
|
||||
By default, dnsmasq gets set up with 8.8.8.8 as an upstream DNS server and all
|
||||
other settings from your existing /etc/resolv.conf are lost. Set the following
|
||||
variables to match your requirements.
|
||||
|
||||
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||
addition to Kargo deployed DNS
|
||||
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||
* *searchdomains* - Array of up to 4 search domains
|
||||
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||
|
||||
For more information, see [DNS
|
||||
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
|
||||
|
||||
#### Other service variables
|
||||
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
proxy
|
||||
|
||||
#### User accounts
|
||||
|
||||
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
||||
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
||||
|
||||
@@ -1,9 +1,34 @@
|
||||
# Valid bootstrap options (required): xenial, coreos, none
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: none
|
||||
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Kubernetes configuration dirs and system namespace.
|
||||
# Those are where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernets.
|
||||
# This puts them in a sane location and namespace.
|
||||
# Editting those values will almost surely break something.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
system_namespace: kube-system
|
||||
|
||||
# Logging directory (sysvinit systems)
|
||||
kube_log_dir: "/var/log/kubernetes"
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
# This is where to save basic auth file
|
||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.5.1
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
@@ -21,6 +46,11 @@ kube_cert_group: kube-cert
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
|
||||
# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
|
||||
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
|
||||
kube_api_anonymous_auth: false
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_api_pwd: "changeme"
|
||||
kube_users:
|
||||
@@ -33,8 +63,10 @@ kube_users:
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
@@ -62,13 +94,14 @@ ndots: 5
|
||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
etcd_multiaccess: false
|
||||
etcd_multiaccess: true
|
||||
|
||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||
# kube_apiserver_port (default 443)
|
||||
loadbalancer_apiserver_localhost: true
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
@@ -123,21 +156,46 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
|
||||
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# When azure is used, you need to also set the following variables.
|
||||
# cloud_provider:
|
||||
|
||||
# see docs/azure.md for details on how to get these values
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
#azure_route_table_name:
|
||||
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||
docker_bin_dir: "/usr/bin"
|
||||
|
||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||
## Please note that overlay2 is only supported on newer kernels
|
||||
#docker_storage_options: -s overlay2
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
|
||||
@@ -1,29 +1,31 @@
|
||||
#[kube-master]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#
|
||||
#[etcd]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node3 ansible_ssh_host=10.99.0.4
|
||||
#
|
||||
#[kube-node]
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node3 ansible_ssh_host=10.99.0.4
|
||||
#node4 ansible_ssh_host=10.99.0.5
|
||||
#node5 ansible_ssh_host=10.99.0.36
|
||||
#node6 ansible_ssh_host=10.99.0.37
|
||||
#
|
||||
#[paris]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||
#node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||
#
|
||||
#[new-york]
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||
#node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||
#
|
||||
#[k8s-cluster:children]
|
||||
#kube-node
|
||||
#kube-master
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
|
||||
# ## configure a bastion host if your nodes are not publicly reachable
|
||||
# bastion ansible_ssh_host=x.x.x.x
|
||||
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
# [etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
5
reset.yml
Normal file
5
reset.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
roles:
|
||||
- { role: reset, tags: reset }
|
||||
18
roles/bastion-ssh-config/tasks/main.yml
Normal file
18
roles/bastion-ssh-config/tasks/main.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- set_fact:
|
||||
has_bastion: "{{ 'bastion' in groups['all'] }}"
|
||||
|
||||
- set_fact:
|
||||
bastion_ip: "{{ hostvars['bastion']['ansible_ssh_host'] }}"
|
||||
when: has_bastion
|
||||
|
||||
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
||||
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_ssh_user in real_user
|
||||
- set_fact:
|
||||
real_user: "{{ ansible_ssh_user }}"
|
||||
delegate_to: bastion
|
||||
when: has_bastion
|
||||
|
||||
- name: create ssh bastion conf
|
||||
become: false
|
||||
template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf"
|
||||
21
roles/bastion-ssh-config/templates/ssh-bastion.conf
Normal file
21
roles/bastion-ssh-config/templates/ssh-bastion.conf
Normal file
@@ -0,0 +1,21 @@
|
||||
{% if has_bastion %}
|
||||
{% set vars={'hosts': ''} %}
|
||||
{% set user='' %}
|
||||
|
||||
{% for h in groups['all'] %}
|
||||
{% if h != 'bastion' %}
|
||||
{% if vars.update({'hosts': vars['hosts'] + ' ' + hostvars[h]['ansible_ssh_host']}) %}{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
Host {{ bastion_ip }}
|
||||
Hostname {{ bastion_ip }}
|
||||
StrictHostKeyChecking no
|
||||
ControlMaster auto
|
||||
ControlPath ~/.ssh/ansible-%r@%h:%p
|
||||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
||||
StrictHostKeyChecking no
|
||||
{% endif %}
|
||||
14
roles/bootstrap-os/tasks/bootstrap-centos.yml
Normal file
14
roles/bootstrap-os/tasks/bootstrap-centos.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Check presence of fastestmirror.conf
|
||||
stat: path=/etc/yum/pluginconf.d/fastestmirror.conf
|
||||
register: fastestmirror
|
||||
|
||||
# fastestmirror plugin actually slows down Ansible deployments
|
||||
- name: Disable fastestmirror plugin
|
||||
lineinfile:
|
||||
dest: /etc/yum/pluginconf.d/fastestmirror.conf
|
||||
regexp: "^enabled=.*"
|
||||
line: "enabled=0"
|
||||
state: present
|
||||
when: fastestmirror.stat.exists
|
||||
@@ -2,8 +2,8 @@
|
||||
- name: Bootstrap | Check if bootstrap is needed
|
||||
raw: stat /opt/bin/.bootstrapped
|
||||
register: need_bootstrap
|
||||
ignore_errors: True
|
||||
|
||||
failed_when: false
|
||||
tags: facts
|
||||
|
||||
- name: Bootstrap | Run bootstrap.sh
|
||||
script: bootstrap.sh
|
||||
@@ -11,13 +11,15 @@
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/opt/bin/python"
|
||||
tags: facts
|
||||
|
||||
- name: Bootstrap | Check if we need to install pip
|
||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||
register: need_pip
|
||||
ignore_errors: True
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: (need_bootstrap | failed)
|
||||
tags: facts
|
||||
|
||||
- name: Bootstrap | Copy get-pip.py
|
||||
copy: src=get-pip.py dest=~/get-pip.py
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
- name: Bootstrap | Check if bootstrap is needed
|
||||
raw: which python
|
||||
register: need_bootstrap
|
||||
ignore_errors: True
|
||||
failed_when: false
|
||||
tags: facts
|
||||
|
||||
- name: Bootstrap | Install python 2.x
|
||||
raw: DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||
raw: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||
when: need_bootstrap | failed
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
||||
tags: facts
|
||||
|
||||
@@ -4,3 +4,8 @@
|
||||
|
||||
- include: bootstrap-coreos.yml
|
||||
when: bootstrap_os == "coreos"
|
||||
|
||||
- include: bootstrap-centos.yml
|
||||
when: bootstrap_os == "centos"
|
||||
|
||||
- include: setup-pipelining.yml
|
||||
6
roles/bootstrap-os/tasks/setup-pipelining.yml
Normal file
6
roles/bootstrap-os/tasks/setup-pipelining.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# Remove requiretty to make ssh pipelining work
|
||||
|
||||
- name: Remove require tty
|
||||
lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent
|
||||
|
||||
@@ -10,3 +10,19 @@
|
||||
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||
#nameservers:
|
||||
# - 127.0.0.1
|
||||
|
||||
# Versions
|
||||
dnsmasq_version: 2.72
|
||||
|
||||
# Images
|
||||
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||
|
||||
# Skip dnsmasq setup
|
||||
skip_dnsmasq: false
|
||||
|
||||
# Limits for dnsmasq/kubedns apps
|
||||
dns_cpu_limit: 100m
|
||||
dns_memory_limit: 170Mi
|
||||
dns_cpu_requests: 70m
|
||||
dns_memory_requests: 70Mi
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
- name: Dnsmasq | restart network
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Dnsmasq | reload network
|
||||
- Dnsmasq | update resolvconf
|
||||
when: ansible_os_family != "CoreOS"
|
||||
|
||||
- name: Dnsmasq | reload network
|
||||
service:
|
||||
name: >-
|
||||
{% if ansible_os_family == "RedHat" -%}
|
||||
network
|
||||
{%- elif ansible_os_family == "Debian" -%}
|
||||
networking
|
||||
{%- endif %}
|
||||
state: restarted
|
||||
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
||||
|
||||
- name: Dnsmasq | update resolvconf
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Dnsmasq | reload resolvconf
|
||||
- Dnsmasq | reload kubelet
|
||||
|
||||
- name: Dnsmasq | reload resolvconf
|
||||
command: /sbin/resolvconf -u
|
||||
ignore_errors: true
|
||||
|
||||
- name: Dnsmasq | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
||||
ignore_errors: true
|
||||
6
roles/dnsmasq/meta/main.yml
Normal file
6
roles/dnsmasq/meta/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.dnsmasq }}"
|
||||
when: not skip_dnsmasq|default(false) and download_localhost|default(false)
|
||||
tags: [download, dnsmasq]
|
||||
@@ -1,58 +0,0 @@
|
||||
---
|
||||
- name: ensure dnsmasq.d directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
|
||||
- name: ensure dnsmasq.d-available directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d-available
|
||||
state: directory
|
||||
|
||||
- name: Write dnsmasq configuration
|
||||
template:
|
||||
src: 01-kube-dns.conf.j2
|
||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: Stat dnsmasq configuration
|
||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||
register: sym
|
||||
|
||||
- name: Move previous configuration
|
||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||
changed_when: False
|
||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||
|
||||
- name: Enable dnsmasq configuration
|
||||
file:
|
||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||
state: link
|
||||
|
||||
- name: Create dnsmasq manifests
|
||||
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||
with_items:
|
||||
- {file: dnsmasq-ds.yml, type: ds}
|
||||
- {file: dnsmasq-svc.yml, type: svc}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Start Resources
|
||||
kube:
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: /etc/kubernetes/{{item.item.file}}
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Check for dnsmasq port (pulling image and running container)
|
||||
wait_for:
|
||||
host: "{{dns_server}}"
|
||||
port: 53
|
||||
delay: 5
|
||||
when: inventory_hostname == groups['kube-node'][0]
|
||||
@@ -1,5 +1,61 @@
|
||||
---
|
||||
- include: dnsmasq.yml
|
||||
when: "{{ not skip_dnsmasq|bool }}"
|
||||
- name: ensure dnsmasq.d directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
tags: bootstrap-os
|
||||
|
||||
- include: resolvconf.yml
|
||||
- name: ensure dnsmasq.d-available directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d-available
|
||||
state: directory
|
||||
tags: bootstrap-os
|
||||
|
||||
- name: Write dnsmasq configuration
|
||||
template:
|
||||
src: 01-kube-dns.conf.j2
|
||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: Stat dnsmasq configuration
|
||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||
register: sym
|
||||
|
||||
- name: Move previous configuration
|
||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||
changed_when: False
|
||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||
|
||||
- name: Enable dnsmasq configuration
|
||||
file:
|
||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||
state: link
|
||||
|
||||
- name: Create dnsmasq manifests
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
|
||||
with_items:
|
||||
- {file: dnsmasq-ds.yml, type: ds}
|
||||
- {file: dnsmasq-svc.yml, type: svc}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Start Resources
|
||||
kube:
|
||||
name: dnsmasq
|
||||
namespace: "{{system_namespace}}"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Check for dnsmasq port (pulling image and running container)
|
||||
wait_for:
|
||||
host: "{{dns_server}}"
|
||||
port: 53
|
||||
delay: 5
|
||||
when: inventory_hostname == groups['kube-node'][0]
|
||||
tags: facts
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
---
|
||||
- name: check resolvconf
|
||||
shell: which resolvconf
|
||||
register: resolvconf
|
||||
ignore_errors: yes
|
||||
|
||||
- name: target resolv.conf file
|
||||
set_fact:
|
||||
resolvconffile: >-
|
||||
{%- if resolvconf.rc == 0 -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
||||
|
||||
- name: generate search domains to resolvconf
|
||||
set_fact:
|
||||
searchentries:
|
||||
"{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}"
|
||||
|
||||
- name: pick dnsmasq cluster IP
|
||||
set_fact:
|
||||
dnsmasq_server: >-
|
||||
{%- if skip_dnsmasq|bool -%}{{ [ skydns_server ] + upstream_dns_servers|default([]) }}{%- else -%}{{ [ dns_server ] }}{%- endif -%}
|
||||
|
||||
- name: generate nameservers to resolvconf
|
||||
set_fact:
|
||||
nameserverentries:
|
||||
"{{ dnsmasq_server|default([]) + nameservers|default([]) }}"
|
||||
|
||||
- name: Remove search and nameserver options from resolvconf head
|
||||
lineinfile:
|
||||
dest: /etc/resolvconf/resolv.conf.d/head
|
||||
state: absent
|
||||
regexp: "^{{ item }}.*$"
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- search
|
||||
- nameserver
|
||||
when: resolvconf.rc == 0
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add search domains to resolv.conf
|
||||
lineinfile:
|
||||
line: "search {{searchentries}}"
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
insertbefore: BOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add nameservers to resolv.conf
|
||||
blockinfile:
|
||||
dest: "{{resolvconffile}}"
|
||||
block: |-
|
||||
{% for item in nameserverentries -%}
|
||||
nameserver {{ item }}
|
||||
{% endfor %}
|
||||
state: present
|
||||
insertafter: "^search.*$"
|
||||
create: yes
|
||||
backup: yes
|
||||
follow: yes
|
||||
marker: "# Ansible nameservers {mark}"
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add options to resolv.conf
|
||||
lineinfile:
|
||||
line: options {{ item }}
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
regexp: "^options.*{{ item }}$"
|
||||
insertafter: EOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- ndots:{{ ndots }}
|
||||
- timeout:2
|
||||
- attempts:2
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Remove search and nameserver options from resolvconf base
|
||||
lineinfile:
|
||||
dest: /etc/resolvconf/resolv.conf.d/base
|
||||
state: absent
|
||||
regexp: "^{{ item }}.*$"
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- search
|
||||
- nameserver
|
||||
when: resolvconf.rc == 0
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/znodnsupdate mode=0755
|
||||
notify: Dnsmasq | restart network
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x
|
||||
notify: Dnsmasq | restart network
|
||||
when: ansible_os_family == "RedHat"
|
||||
@@ -7,19 +7,21 @@ addn-hosts=/etc/hosts
|
||||
strict-order
|
||||
# Forward k8s domain to kube-dns
|
||||
server=/{{ dns_domain }}/{{ skydns_server }}
|
||||
# Reply NXDOMAIN to bogus domains requests like com.cluster.local.cluster.local
|
||||
local=/{{ bogus_domains }}
|
||||
|
||||
#Set upstream dns servers
|
||||
{% if upstream_dns_servers is defined %}
|
||||
{% for srv in upstream_dns_servers %}
|
||||
server={{ srv }}
|
||||
{% endfor %}
|
||||
{% elif cloud_provider == "gce" %}
|
||||
server=169.254.169.254
|
||||
{% else %}
|
||||
server=8.8.8.8
|
||||
server=8.8.4.4
|
||||
server={{ default_resolver }}
|
||||
{% endif %}
|
||||
|
||||
{% if kube_log_level == '4' %}
|
||||
log-queries
|
||||
{% endif %}
|
||||
bogus-priv
|
||||
no-resolv
|
||||
no-negcache
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
namespace: "{{system_namespace}}"
|
||||
labels:
|
||||
k8s-app: dnsmasq
|
||||
spec:
|
||||
@@ -14,7 +14,8 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: dnsmasq
|
||||
image: andyshinn/dnsmasq:2.72
|
||||
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
command:
|
||||
- dnsmasq
|
||||
args:
|
||||
@@ -28,8 +29,11 @@ spec:
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256M
|
||||
cpu: {{ dns_cpu_limit }}
|
||||
memory: {{ dns_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ dns_cpu_requests }}
|
||||
memory: {{ dns_memory_requests }}
|
||||
ports:
|
||||
- name: dns
|
||||
containerPort: 53
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user