mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
412 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5fd2b151b9 | ||
|
|
3c107ef4dc | ||
|
|
a5f93d6013 | ||
|
|
38338e848d | ||
|
|
e9518072a8 | ||
|
|
10dbd0afbd | ||
|
|
1dce56e2f8 | ||
|
|
1f0b2eac12 | ||
|
|
d9539e0f27 | ||
|
|
0909368339 | ||
|
|
091b634ea1 | ||
|
|
d18804b0bb | ||
|
|
a8b5b856d1 | ||
|
|
1d2a18b355 | ||
|
|
4a59340182 | ||
|
|
aa33613b98 | ||
|
|
96372c15e2 | ||
|
|
f365b32c60 | ||
|
|
5af2c42bde | ||
|
|
c0400e9db5 | ||
|
|
f7447837c5 | ||
|
|
a4dbee3e38 | ||
|
|
fb7899aa06 | ||
|
|
6d54d9f49a | ||
|
|
6546869c42 | ||
|
|
aa79a02f9c | ||
|
|
447febcdd6 | ||
|
|
61732847b6 | ||
|
|
fcd9d97f10 | ||
|
|
b6b5d52f78 | ||
|
|
4b6f29d5e1 | ||
|
|
f5d5230034 | ||
|
|
8dc19374cc | ||
|
|
a8f2af0503 | ||
|
|
d8a2941e9e | ||
|
|
55b6d0bbdd | ||
|
|
a3c044b657 | ||
|
|
4a2abc1a46 | ||
|
|
410c78f2e5 | ||
|
|
3b5830a1cf | ||
|
|
ab7df10a7d | ||
|
|
93663e987c | ||
|
|
6114266b84 | ||
|
|
97f96a6376 | ||
|
|
58062be2a3 | ||
|
|
031cf565ec | ||
|
|
5ec4efe88e | ||
|
|
e02aae71a1 | ||
|
|
1f9f885379 | ||
|
|
80509673d2 | ||
|
|
b902110d75 | ||
|
|
2c23027794 | ||
|
|
15589dd88f | ||
|
|
1a7f52c889 | ||
|
|
24cbf2287c | ||
|
|
a56d9de502 | ||
|
|
95e14ffb54 | ||
|
|
6139ee3add | ||
|
|
f0c0390646 | ||
|
|
e7a1949d85 | ||
|
|
ff8cb46bb9 | ||
|
|
399cb9707a | ||
|
|
6d9cd2d720 | ||
|
|
622537bd33 | ||
|
|
9169f840c2 | ||
|
|
79996b557b | ||
|
|
be8e5e1fdc | ||
|
|
bb0c3537cb | ||
|
|
36a5143478 | ||
|
|
7b86b87dca | ||
|
|
53affb9bc0 | ||
|
|
0fe2b66097 | ||
|
|
385f7f6e75 | ||
|
|
9f1e3db906 | ||
|
|
b63d900625 | ||
|
|
ac295de64c | ||
|
|
111571b67a | ||
|
|
a4bce333a3 | ||
|
|
c53a6eca86 | ||
|
|
7c2785e083 | ||
|
|
aab4149ab0 | ||
|
|
89a4b92753 | ||
|
|
5414a410bd | ||
|
|
ad796d188d | ||
|
|
de8cd5cd7f | ||
|
|
cc93c4fe12 | ||
|
|
c456a311d6 | ||
|
|
ed4b4b8482 | ||
|
|
8d9f207836 | ||
|
|
2a3164e040 | ||
|
|
f10d1327d4 | ||
|
|
d314174149 | ||
|
|
9885fe73dc | ||
|
|
f2cf323ecf | ||
|
|
cf4f2b4f14 | ||
|
|
fbc13ea6dc | ||
|
|
b8bc8eee41 | ||
|
|
11380769cd | ||
|
|
ee62c99eb1 | ||
|
|
843d439898 | ||
|
|
8d5da5cfca | ||
|
|
5a2c75a3cb | ||
|
|
c1e4cef75b | ||
|
|
5d73b9ccc5 | ||
|
|
9efe1fe09d | ||
|
|
4bbec963e6 | ||
|
|
348fc5b109 | ||
|
|
101864c050 | ||
|
|
fe150d4e4d | ||
|
|
048ac264a3 | ||
|
|
add7570a94 | ||
|
|
db77bd9588 | ||
|
|
768fe05eea | ||
|
|
1c48a001df | ||
|
|
a7276901a3 | ||
|
|
b0fa189b3c | ||
|
|
cc57152cc0 | ||
|
|
046f3eebcb | ||
|
|
ea874899c7 | ||
|
|
1782d19e1f | ||
|
|
e2476fbd0b | ||
|
|
07cd81ef58 | ||
|
|
92f542938c | ||
|
|
9df2306ee9 | ||
|
|
495d0b659a | ||
|
|
a2f8f17270 | ||
|
|
0e2329b59e | ||
|
|
7c011b14df | ||
|
|
ad68b23d8a | ||
|
|
670d977dfb | ||
|
|
70143d87bf | ||
|
|
e21ca5433a | ||
|
|
68ad4ff4d9 | ||
|
|
d7b0ff3de6 | ||
|
|
725f9ea3bd | ||
|
|
a9684648ab | ||
|
|
6b1dfa4ae6 | ||
|
|
9cc73bdf08 | ||
|
|
114ab5e4e6 | ||
|
|
77ebf4531c | ||
|
|
1551fe01f9 | ||
|
|
29874baf8a | ||
|
|
e6fe9d5807 | ||
|
|
81317505eb | ||
|
|
d57c27ffcf | ||
|
|
8d7b25d4f0 | ||
|
|
8e809aed01 | ||
|
|
b4c87c669b | ||
|
|
bca704e7e9 | ||
|
|
d50eb60827 | ||
|
|
dbd9aaf1ea | ||
|
|
d20d5e648f | ||
|
|
96640e68e2 | ||
|
|
3e007df97c | ||
|
|
06584ee3aa | ||
|
|
26e3142c95 | ||
|
|
33585fa673 | ||
|
|
665ce82d71 | ||
|
|
fb78bfaaae | ||
|
|
b4ce221002 | ||
|
|
444b1dafdc | ||
|
|
d6174b22e9 | ||
|
|
c75f394707 | ||
|
|
94ce99eb0a | ||
|
|
0515814e0c | ||
|
|
c87f4f613e | ||
|
|
f12e9fa22a | ||
|
|
3ca11b70c4 | ||
|
|
1cfaf927c9 | ||
|
|
45135ad3e4 | ||
|
|
9c06dd2863 | ||
|
|
b2088b72dd | ||
|
|
4e721bfd9d | ||
|
|
f52ed9f91e | ||
|
|
88f3b86410 | ||
|
|
3117858dcd | ||
|
|
8c36915ea0 | ||
|
|
5176e5c968 | ||
|
|
e95c733a81 | ||
|
|
15c2919ecc | ||
|
|
774f4dbbf7 | ||
|
|
b1e852a785 | ||
|
|
42ea4d2cfd | ||
|
|
9fd14cb6ea | ||
|
|
4e34803b1e | ||
|
|
7abcf6e0b9 | ||
|
|
e5ad0836bc | ||
|
|
2c50f20429 | ||
|
|
a15d626771 | ||
|
|
fd9b26675e | ||
|
|
eb33f085b6 | ||
|
|
fb774d4317 | ||
|
|
459bee6d2c | ||
|
|
6e080cd9b0 | ||
|
|
8a5ba6b20c | ||
|
|
c3ec3ff902 | ||
|
|
284a21012c | ||
|
|
7897c34ba3 | ||
|
|
8cc84e132a | ||
|
|
00ad151186 | ||
|
|
4265149463 | ||
|
|
ee8d6ab4fc | ||
|
|
a80745b5bd | ||
|
|
bd3f2d5cef | ||
|
|
e9c591e6de | ||
|
|
710d5ae48e | ||
|
|
fc769eb870 | ||
|
|
eec2ed5809 | ||
|
|
f7dd20f21c | ||
|
|
bfc9bcb8c7 | ||
|
|
8eb26c21be | ||
|
|
3c66e4cdba | ||
|
|
f0f2b81276 | ||
|
|
45ed6de315 | ||
|
|
c9290182be | ||
|
|
893538d8e6 | ||
|
|
246c8209c1 | ||
|
|
36fe2cb5ea | ||
|
|
9d6cc3a8d5 | ||
|
|
8870178a2d | ||
|
|
b0079ccd77 | ||
|
|
1772d122b2 | ||
|
|
756ae926ba | ||
|
|
2c1db56213 | ||
|
|
d672cef21c | ||
|
|
27e239c8d6 | ||
|
|
f1d7af11ee | ||
|
|
59a097b255 | ||
|
|
d40783022b | ||
|
|
7a3a473ccf | ||
|
|
2cdf752481 | ||
|
|
26f93feb2d | ||
|
|
d4aba0af48 | ||
|
|
42d12afbc6 | ||
|
|
022468ae3e | ||
|
|
3bb42cc66a | ||
|
|
8b5b27bb51 | ||
|
|
7328e0e1ac | ||
|
|
eeaf2ea4cf | ||
|
|
42eb8e4663 | ||
|
|
c13d0db0cc | ||
|
|
dba2026002 | ||
|
|
a62f74259c | ||
|
|
a2331fec55 | ||
|
|
b6872a0be3 | ||
|
|
bc7a73ca2c | ||
|
|
c405944e9d | ||
|
|
7eab889c07 | ||
|
|
bb55f68f95 | ||
|
|
658543c949 | ||
|
|
5b382668f5 | ||
|
|
b7692fad09 | ||
|
|
fbdda81515 | ||
|
|
7484888e42 | ||
|
|
f783a638a3 | ||
|
|
2d18e19263 | ||
|
|
ff7d489f2d | ||
|
|
6d29a5981c | ||
|
|
10b75d1d51 | ||
|
|
aa447585c4 | ||
|
|
f6c32c3ea3 | ||
|
|
d208896c46 | ||
|
|
08506f5139 | ||
|
|
2c4b11f321 | ||
|
|
d890d2f277 | ||
|
|
793f3990a0 | ||
|
|
9d439d2e5b | ||
|
|
db03f17486 | ||
|
|
dff78f616e | ||
|
|
d3a4d8dc24 | ||
|
|
dc58159d16 | ||
|
|
b60d5647a2 | ||
|
|
2bcfb3fea3 | ||
|
|
66f27ed1f3 | ||
|
|
cb84b93930 | ||
|
|
32a5453473 | ||
|
|
97d126ac8b | ||
|
|
deea7bb87b | ||
|
|
1bd1825ecb | ||
|
|
20e36191bb | ||
|
|
769566f36c | ||
|
|
ddd230485b | ||
|
|
5ee0cbaa42 | ||
|
|
ff675d40f9 | ||
|
|
0eebe43c08 | ||
|
|
069636e5b4 | ||
|
|
a03540dabc | ||
|
|
f6d69d0a00 | ||
|
|
cc2f26b8e9 | ||
|
|
3e687bbe9a | ||
|
|
c5113d3352 | ||
|
|
4d9712a3ef | ||
|
|
5b9b2c0973 | ||
|
|
a5af87758a | ||
|
|
8b11de5425 | ||
|
|
ff928e0e66 | ||
|
|
952191db99 | ||
|
|
61adca2a6d | ||
|
|
9872ed4bb2 | ||
|
|
3aa2d56da9 | ||
|
|
6a398724b6 | ||
|
|
af3823bced | ||
|
|
1e601bb2ef | ||
|
|
e4d240b1b7 | ||
|
|
e3470b28c5 | ||
|
|
e9a48770a7 | ||
|
|
0322b69f63 | ||
|
|
e587e82f7f | ||
|
|
5f5199bf53 | ||
|
|
876c4df1b6 | ||
|
|
e68ec257a3 | ||
|
|
216e0b2a52 | ||
|
|
ab0ff2ab3c | ||
|
|
5cd65f9c45 | ||
|
|
4e47c267fb | ||
|
|
cb47bbf753 | ||
|
|
c41d200a95 | ||
|
|
771d537ff3 | ||
|
|
8ca1f4ce44 | ||
|
|
625ec529ff | ||
|
|
caa81f3ac2 | ||
|
|
8092f57695 | ||
|
|
965a1234d3 | ||
|
|
15bc445a9c | ||
|
|
bb72de0dc9 | ||
|
|
6da0ecfa55 | ||
|
|
1ccc10baf8 | ||
|
|
45c2900e71 | ||
|
|
eb583dd2f3 | ||
|
|
f6233ffc9a | ||
|
|
46ee9faca9 | ||
|
|
f320b79c0c | ||
|
|
6cc05c103a | ||
|
|
88577b9889 | ||
|
|
5821f9748a | ||
|
|
c58bd33af7 | ||
|
|
cf7c60029b | ||
|
|
046e315bfd | ||
|
|
251800eb16 | ||
|
|
fe16fecd8f | ||
|
|
9ea9604b3f | ||
|
|
a32cd85eb7 | ||
|
|
95b460ae94 | ||
|
|
57e467c03c | ||
|
|
764a2fd5a8 | ||
|
|
d197130148 | ||
|
|
39d68822ed | ||
|
|
4ece73d432 | ||
|
|
60a217766f | ||
|
|
309240cd6f | ||
|
|
6b0d26ddf0 | ||
|
|
5aa8df163e | ||
|
|
881dc8172c | ||
|
|
aff441a01f | ||
|
|
44a14d0b3e | ||
|
|
f106bf5bc4 | ||
|
|
39b8336f3f | ||
|
|
a6bc284abd | ||
|
|
6b7b8a2303 | ||
|
|
8f20d90f88 | ||
|
|
047f098660 | ||
|
|
3b2554217b | ||
|
|
672d50393c | ||
|
|
d4467ab1c6 | ||
|
|
ebeb57ee7c | ||
|
|
f9355ea14d | ||
|
|
2ca6819cdf | ||
|
|
437372021d | ||
|
|
78ac01add7 | ||
|
|
3b3938c6a6 | ||
|
|
36fc05d2fd | ||
|
|
7abc747b56 | ||
|
|
9f976e568d | ||
|
|
9d7142f476 | ||
|
|
50f77cca1d | ||
|
|
33ebf124c4 | ||
|
|
03e162b342 | ||
|
|
d8b06f3e2f | ||
|
|
d6f206b5fd | ||
|
|
357a15ffd4 | ||
|
|
a3f892c76c | ||
|
|
2778ac61a4 | ||
|
|
c7b00caeaa | ||
|
|
7fe255e5bb | ||
|
|
93f7a26896 | ||
|
|
3d617fbf88 | ||
|
|
c59c3a1bcf | ||
|
|
4c0bf6225a | ||
|
|
b11662a887 | ||
|
|
11f1f71b3b | ||
|
|
0e9d1e09e3 | ||
|
|
65d2a3b0e5 | ||
|
|
8165da3f3d | ||
|
|
4b7347f1cd | ||
|
|
e6902d8ecc | ||
|
|
a5137affeb | ||
|
|
a423927ac9 | ||
|
|
31c2922752 | ||
|
|
7e81855e24 | ||
|
|
2510092599 | ||
|
|
6113a3f350 | ||
|
|
7d6fc1d680 | ||
|
|
91a101c855 | ||
|
|
1de127470f | ||
|
|
40de468413 | ||
|
|
c402feffbd | ||
|
|
f74d6b084b | ||
|
|
dd022f2dbc | ||
|
|
19928dea2b | ||
|
|
21273926ce | ||
|
|
c03bab3246 | ||
|
|
71347322d6 |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||||
|
|
||||||
|
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If this is a BUG REPORT, please:
|
||||||
|
- Fill in as much of the template below as you can. If you leave out
|
||||||
|
information, we can't help you as well.
|
||||||
|
|
||||||
|
If this is a FEATURE REQUEST, please:
|
||||||
|
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||||
|
|
||||||
|
In both cases, be ready for followup questions, and please respond in a timely
|
||||||
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
|
explain why.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Environment**:
|
||||||
|
- **Cloud provider or hardware configuration:**
|
||||||
|
|
||||||
|
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||||
|
|
||||||
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
|
||||||
|
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
|
|
||||||
|
**Network plugin used**:
|
||||||
|
|
||||||
|
|
||||||
|
**Copy of your inventory file:**
|
||||||
|
|
||||||
|
|
||||||
|
**Command used to invoke ansible**:
|
||||||
|
|
||||||
|
|
||||||
|
**Output of ansible run**:
|
||||||
|
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||||
|
|
||||||
|
**Anything else do we need to know**:
|
||||||
|
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||||
|
Script can be started by:
|
||||||
|
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||||
|
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||||
|
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -3,3 +3,11 @@
|
|||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
temp
|
temp
|
||||||
.idea
|
.idea
|
||||||
|
.tox
|
||||||
|
.cache
|
||||||
|
*.egg-info
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.tfstate
|
||||||
|
*.tfstate.backup
|
||||||
|
/ssh-bastion.conf
|
||||||
|
|||||||
455
.gitlab-ci.yml
Normal file
455
.gitlab-ci.yml
Normal file
@@ -0,0 +1,455 @@
|
|||||||
|
stages:
|
||||||
|
- unit-tests
|
||||||
|
- deploy-gce-part1
|
||||||
|
- deploy-gce-part2
|
||||||
|
- deploy-gce-special
|
||||||
|
|
||||||
|
variables:
|
||||||
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
|
# DOCKER_HOST: tcp://localhost:2375
|
||||||
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
|
|
||||||
|
# asia-east1-a
|
||||||
|
# asia-northeast1-a
|
||||||
|
# europe-west1-b
|
||||||
|
# us-central1-a
|
||||||
|
# us-east1-b
|
||||||
|
# us-west1-a
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- pip install ansible
|
||||||
|
- pip install netaddr
|
||||||
|
- pip install apache-libcloud==0.20.1
|
||||||
|
- pip install boto==2.9.0
|
||||||
|
- mkdir -p /.ssh
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
|
||||||
|
.job: &job
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
- docker
|
||||||
|
image: quay.io/ant31/kargo:master
|
||||||
|
|
||||||
|
.docker_service: &docker_service
|
||||||
|
services:
|
||||||
|
- docker:dind
|
||||||
|
|
||||||
|
.create_cluster: &create_cluster
|
||||||
|
<<: *job
|
||||||
|
<<: *docker_service
|
||||||
|
|
||||||
|
.gce_variables: &gce_variables
|
||||||
|
GCE_USER: travis
|
||||||
|
SSH_USER: $GCE_USER
|
||||||
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
|
CONTAINER_ENGINE: docker
|
||||||
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||||
|
BOOTSTRAP_OS: none
|
||||||
|
RESOLVCONF_MODE: docker_dns
|
||||||
|
LOG_LEVEL: "-vv"
|
||||||
|
ETCD_DEPLOYMENT: "docker"
|
||||||
|
KUBELET_DEPLOYMENT: "docker"
|
||||||
|
MAGIC: "ci check this"
|
||||||
|
|
||||||
|
.gce: &gce
|
||||||
|
<<: *job
|
||||||
|
<<: *docker_service
|
||||||
|
cache:
|
||||||
|
key: "$CI_BUILD_REF_NAME"
|
||||||
|
paths:
|
||||||
|
- downloads/
|
||||||
|
- $HOME/.cache
|
||||||
|
stage: deploy-gce
|
||||||
|
before_script:
|
||||||
|
- docker info
|
||||||
|
- pip install ansible==2.1.3.0
|
||||||
|
- pip install netaddr
|
||||||
|
- pip install apache-libcloud==0.20.1
|
||||||
|
- pip install boto==2.9.0
|
||||||
|
- mkdir -p /.ssh
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
- mkdir -p $HOME/.ssh
|
||||||
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
|
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||||
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
|
- ansible-playbook --version
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
script:
|
||||||
|
- pwd
|
||||||
|
- ls
|
||||||
|
- echo ${PWD}
|
||||||
|
- >
|
||||||
|
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Create cluster
|
||||||
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_run_once=true
|
||||||
|
-e download_localhost=true
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
cluster.yml
|
||||||
|
|
||||||
|
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Advanced DNS checks
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Test matrix. Leave the comments for markup scripts.
|
||||||
|
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
CLOUD_IMAGE: coreos-stable
|
||||||
|
CLOUD_REGION: us-west1-b
|
||||||
|
CLUSTER_MODE: separated
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
|
||||||
|
.debian8_canal_ha_variables: &debian8_canal_ha_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: debian-8-kubespray
|
||||||
|
CLOUD_REGION: us-east1-b
|
||||||
|
CLUSTER_MODE: ha
|
||||||
|
|
||||||
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: rhel-7
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: centos-7
|
||||||
|
CLOUD_REGION: us-west1-a
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.debian8_calico_variables: &debian8_calico_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
CLOUD_IMAGE: debian-8-kubespray
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.coreos_canal_variables: &coreos_canal_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: coreos-stable
|
||||||
|
CLOUD_REGION: us-east1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
|
||||||
|
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: rhel-7
|
||||||
|
CLOUD_REGION: us-east1-b
|
||||||
|
CLUSTER_MODE: separated
|
||||||
|
|
||||||
|
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separated
|
||||||
|
|
||||||
|
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
CLOUD_IMAGE: centos-7
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLUSTER_MODE: ha
|
||||||
|
|
||||||
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: coreos-alpha
|
||||||
|
CLOUD_REGION: us-west1-a
|
||||||
|
CLUSTER_MODE: ha
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
|
||||||
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separated
|
||||||
|
ETCD_DEPLOYMENT: rkt
|
||||||
|
KUBELET_DEPLOYMENT: rkt
|
||||||
|
|
||||||
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
|
coreos-calico-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_calico_sep_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
coreos-calico-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_calico_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
centos7-flannel:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_flannel_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
centos7-flannel-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_flannel_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
ubuntu-weave-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_weave_sep_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-weave-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_weave_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
debian8-canal-ha:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_canal_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
debian8-canal-ha-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_canal_ha_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
rhel7-weave:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_weave_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
rhel7-weave-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_weave_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
debian8-calico:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_calico_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
debian8-calico-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_calico_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
coreos-canal:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_canal_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
coreos-canal-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_canal_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
rhel7-canal-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_canal_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/,]
|
||||||
|
|
||||||
|
rhel7-canal-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_canal_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
centos7-calico-ha:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_calico_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
centos7-calico-ha-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_calico_ha_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||||
|
coreos-alpha-weave-ha:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_alpha_weave_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-rkt-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_rkt_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
# Premoderated with manual actions
|
||||||
|
syntax-check:
|
||||||
|
<<: *job
|
||||||
|
stage: unit-tests
|
||||||
|
before_script:
|
||||||
|
- apt-get -y install jq
|
||||||
|
script:
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||||
|
- /bin/sh scripts/premoderator.sh
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
tox-inventory-builder:
|
||||||
|
stage: unit-tests
|
||||||
|
<<: *job
|
||||||
|
script:
|
||||||
|
- pip install tox
|
||||||
|
- cd contrib/inventory_builder && tox
|
||||||
|
when: manual
|
||||||
|
except: ['triggers', 'master']
|
||||||
149
.travis.yml
149
.travis.yml
@@ -1,149 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
|
|
||||||
git:
|
|
||||||
depth: 5
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
GCE_USER=travis
|
|
||||||
SSH_USER=$GCE_USER
|
|
||||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
||||||
CONTAINER_ENGINE=docker
|
|
||||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
||||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
||||||
matrix:
|
|
||||||
# Debian Jessie
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
# Centos 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
|
|
||||||
# Redhat 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
|
|
||||||
# Ubuntu 16.04
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
# Ubuntu 15.10
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=us-central1-a
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
# Install Ansible.
|
|
||||||
- pip install --user boto -U
|
|
||||||
- pip install --user ansible
|
|
||||||
- pip install --user netaddr
|
|
||||||
- pip install --user apache-libcloud
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- directories:
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/.local
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
||||||
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
||||||
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
|
|
||||||
|
|
||||||
script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Create cluster
|
|
||||||
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
## Create a POD
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
||||||
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
161
.travis.yml.bak
Normal file
161
.travis.yml.bak
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
sudo: required
|
||||||
|
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
git:
|
||||||
|
depth: 5
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
GCE_USER=travis
|
||||||
|
SSH_USER=$GCE_USER
|
||||||
|
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||||
|
CONTAINER_ENGINE=docker
|
||||||
|
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||||
|
GS_ACCESS_KEY_ID=$GS_KEY
|
||||||
|
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
BOOTSTRAP_OS=none
|
||||||
|
matrix:
|
||||||
|
# Debian Jessie
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=debian-8-kubespray
|
||||||
|
CLOUD_REGION=asia-east1-a
|
||||||
|
CLUSTER_MODE=ha
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=debian-8-kubespray
|
||||||
|
CLOUD_REGION=europe-west1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
|
||||||
|
# Centos 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=centos-7
|
||||||
|
CLOUD_REGION=asia-northeast1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=centos-7
|
||||||
|
CLOUD_REGION=us-central1-b
|
||||||
|
CLUSTER_MODE=ha
|
||||||
|
|
||||||
|
# Redhat 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=rhel-7
|
||||||
|
CLOUD_REGION=us-east1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
|
||||||
|
# CoreOS stable
|
||||||
|
#- >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=weave
|
||||||
|
# CLOUD_IMAGE=coreos-stable
|
||||||
|
# CLOUD_REGION=europe-west1-b
|
||||||
|
# CLUSTER_MODE=ha
|
||||||
|
# BOOTSTRAP_OS=coreos
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=coreos-stable
|
||||||
|
CLOUD_REGION=us-west1-b
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
# Extra cases for separated roles
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=rhel-7
|
||||||
|
CLOUD_REGION=asia-northeast1-b
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION=europe-west1-d
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=coreos-stable
|
||||||
|
CLOUD_REGION=us-central1-f
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
# Install Ansible.
|
||||||
|
- pip install --user ansible
|
||||||
|
- pip install --user netaddr
|
||||||
|
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
||||||
|
- pip install --user apache-libcloud==0.20.1
|
||||||
|
- pip install --user boto==2.9.0 -U
|
||||||
|
# Load cached docker images
|
||||||
|
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
||||||
|
|
||||||
|
cache:
|
||||||
|
- directories:
|
||||||
|
- $HOME/.cache/pip
|
||||||
|
- $HOME/.local
|
||||||
|
- /var/tmp/releases
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||||
|
- mkdir -p $HOME/.ssh
|
||||||
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
|
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||||
|
- $HOME/.local/bin/ansible-playbook --version
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||||
|
|
||||||
|
script:
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Create cluster with netchecker app deployed
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_run_once=true
|
||||||
|
-e download_localhost=true
|
||||||
|
-e local_release_dir=/var/tmp/releases
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
cluster.yml
|
||||||
|
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
## Advanced DNS checks
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
3
OWNERS
3
OWNERS
@@ -4,3 +4,6 @@
|
|||||||
owners:
|
owners:
|
||||||
- Smana
|
- Smana
|
||||||
- ant31
|
- ant31
|
||||||
|
- bogdando
|
||||||
|
- mattymo
|
||||||
|
- rsmitty
|
||||||
|
|||||||
80
README.md
80
README.md
@@ -1,10 +1,10 @@
|
|||||||

|

|
||||||
|
|
||||||
##Deploy a production ready kubernetes cluster
|
##Deploy a production ready kubernetes cluster
|
||||||
|
|
||||||
If you have questions, you can [invite yourself](https://slack.kubespray.io/) to **chat** with us on Slack! [](https://kubespray.slack.com)
|
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Support most popular **Linux distributions**
|
- Support most popular **Linux distributions**
|
||||||
@@ -13,75 +13,89 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
|
|||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
||||||
**Ansible** usual commands <br>
|
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
|
||||||
* [Requirements](#requirements)
|
* [Requirements](#requirements)
|
||||||
|
* [Kargo vs ...](docs/comparisons.md)
|
||||||
* [Getting started](docs/getting-started.md)
|
* [Getting started](docs/getting-started.md)
|
||||||
|
* [Ansible inventory and tags](docs/ansible.md)
|
||||||
|
* [Deployment data variables](docs/vars.md)
|
||||||
|
* [DNS stack](docs/dns-stack.md)
|
||||||
|
* [HA mode](docs/ha-mode.md)
|
||||||
|
* [Network plugins](#network-plugins)
|
||||||
* [Vagrant install](docs/vagrant.md)
|
* [Vagrant install](docs/vagrant.md)
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
* [Ansible variables](docs/ansible.md)
|
* [Downloaded artifacts](docs/downloads.md)
|
||||||
* [Cloud providers](docs/cloud.md)
|
* [Cloud providers](docs/cloud.md)
|
||||||
* [OpenStack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Network plugins](#network-plugins)
|
* [Azure](docs/azure.md)
|
||||||
|
* [Large deployments](docs/large-deployments.md)
|
||||||
|
* [Upgrades basics](docs/upgrades.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
Supported Linux distributions
|
Supported Linux distributions
|
||||||
===============
|
===============
|
||||||
|
|
||||||
* **CoreOS**
|
* **Container Linux by CoreOS**
|
||||||
* **Debian** Wheezy, Jessie
|
* **Debian** Jessie
|
||||||
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
* **Ubuntu** 16.04
|
||||||
* **Fedora** 23
|
|
||||||
* **CentOS/RHEL** 7
|
* **CentOS/RHEL** 7
|
||||||
|
|
||||||
Versions
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
--------------
|
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.0 <br>
|
Versions of supported components
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
--------------------------------
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
||||||
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
||||||
|
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||||
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||||
|
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||||
[weave](http://weave.works/) v1.6.1 <br>
|
[weave](http://weave.works/) v1.6.1 <br>
|
||||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
[docker](https://www.docker.com/) v1.12.5 <br>
|
||||||
|
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
||||||
|
|
||||||
|
Note: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
|
plugins' related OS services. Also note, only one of the supported network
|
||||||
|
plugins can be deployed for a given single cluster.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
|
* The target servers are configured to allow **IPv4 forwarding**.
|
||||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
* **Copy your ssh keys** to all the servers part of your inventory.
|
||||||
* **Ansible v2.x and python-netaddr**
|
* **Ansible v2.2 (or newer) and python-netaddr**
|
||||||
|
|
||||||
|
|
||||||
## Network plugins
|
## Network plugins
|
||||||
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
You can choose between 4 network plugins. (default: `flannel` with vxlan backend)
|
||||||
|
|
||||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||||
|
|
||||||
|
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`
|
|
||||||
|
|
||||||
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
|
option to leverage built-in cloud provider networking instead.
|
||||||
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
[](https://travis-ci.org/kubespray/kargo) </br>
|

|
||||||
|
|
||||||
### Google Compute Engine
|
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
||||||
|
|
||||||
| Calico | Flannel | Weave |
|
CI/end-to-end tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||||
------------- | ------------- | ------------- | ------------- |
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
|
||||||
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
|
||||||
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
|
||||||
|
|
||||||
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
|
||||||
|
|||||||
17
Vagrantfile
vendored
17
Vagrantfile
vendored
@@ -16,7 +16,7 @@ $vm_cpus = 1
|
|||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$box = "bento/ubuntu-14.04"
|
$box = "bento/ubuntu-16.04"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
@@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
|
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||||
|
(1..$num_instances).each do |i|
|
||||||
|
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
# always use Vagrants insecure key
|
# always use Vagrants insecure key
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
@@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
|
|||||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||||
config.vm.hostname = vm_name
|
config.vm.hostname = vm_name
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
|
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||||
|
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||||
|
config.proxy.no_proxy = $no_proxy
|
||||||
|
end
|
||||||
|
|
||||||
if $expose_docker_tcp
|
if $expose_docker_tcp
|
||||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||||
end
|
end
|
||||||
@@ -80,7 +93,7 @@ Vagrant.configure("2") do |config|
|
|||||||
"flannel_interface" => ip,
|
"flannel_interface" => ip,
|
||||||
"flannel_backend_type" => "host-gw",
|
"flannel_backend_type" => "host-gw",
|
||||||
"local_release_dir" => "/vagrant/temp",
|
"local_release_dir" => "/vagrant/temp",
|
||||||
"download_run_once" => "True"
|
"download_run_once" => "False"
|
||||||
}
|
}
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,9 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
[defaults]
|
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||||
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
|
[defaults]
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
|
gathering = smart
|
||||||
|
fact_caching = jsonfile
|
||||||
|
fact_caching_connection = /tmp
|
||||||
|
|||||||
45
cluster.yml
45
cluster.yml
@@ -1,36 +1,67 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- bastion-ssh-config
|
||||||
|
tags: [localhost, bastion]
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: true
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- bootstrap-os
|
- bootstrap-os
|
||||||
tags:
|
tags:
|
||||||
- bootstrap-os
|
- bootstrap-os
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
- hosts: all
|
any_errors_fatal: true
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: etcd:!k8s-cluster
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: true
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
- { role: docker, tags: docker }
|
||||||
|
- { role: rkt, tags: rkt, when: "'rkt' in [ etcd_deployment_type, kubelet_deployment_type ]" }
|
||||||
|
|
||||||
|
- hosts: etcd:!k8s-cluster
|
||||||
|
any_errors_fatal: true
|
||||||
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: etcd, tags: etcd }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: true
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
|
||||||
- { role: etcd, tags: etcd }
|
- { role: etcd, tags: etcd }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: true
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: kubernetes-apps/lib, tags: apps }
|
||||||
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
|
|
||||||
|
- hosts: calico-rr
|
||||||
|
any_errors_fatal: true
|
||||||
|
roles:
|
||||||
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: true
|
||||||
roles:
|
roles:
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
|
any_errors_fatal: true
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubernetes-apps/lib, tags: apps }
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
2
contrib/azurerm/.gitignore
vendored
Normal file
2
contrib/azurerm/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.generated
|
||||||
|
/inventory
|
||||||
64
contrib/azurerm/README.md
Normal file
64
contrib/azurerm/README.md
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Kubernetes on Azure with Azure Resource Group Templates
|
||||||
|
|
||||||
|
Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates)
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||||
|
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
|
||||||
|
- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
|
||||||
|
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
||||||
|
|
||||||
|
## Configuration through group_vars/all
|
||||||
|
|
||||||
|
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
||||||
|
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||||
|
experience.
|
||||||
|
|
||||||
|
## Bastion host
|
||||||
|
|
||||||
|
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||||
|
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||||
|
also removes all public IPs from all other VMs.
|
||||||
|
|
||||||
|
## Generating and applying
|
||||||
|
|
||||||
|
To generate and apply the templates, call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./apply-rg.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||||
|
take care about creating/modifying whatever is needed.
|
||||||
|
|
||||||
|
## Clearing a resource group
|
||||||
|
|
||||||
|
If you need to delete all resources from a resource group, simply call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./clear-rg.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||||
|
|
||||||
|
|
||||||
|
## Generating an inventory for kargo
|
||||||
|
|
||||||
|
After you have applied the templates, you can generate an inventory with this call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./generate-inventory.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
It will create the file ./inventory which can then be used with kargo, e.g.:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ cd kargo-root-dir
|
||||||
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
19
contrib/azurerm/apply-rg.sh
Executable file
19
contrib/azurerm/apply-rg.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
14
contrib/azurerm/clear-rg.sh
Executable file
14
contrib/azurerm/clear-rg.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||||
12
contrib/azurerm/generate-inventory.sh
Executable file
12
contrib/azurerm/generate-inventory.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
5
contrib/azurerm/generate-inventory.yml
Normal file
5
contrib/azurerm/generate-inventory.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-inventory
|
||||||
5
contrib/azurerm/generate-templates.yml
Normal file
5
contrib/azurerm/generate-templates.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-templates
|
||||||
26
contrib/azurerm/group_vars/all
Normal file
26
contrib/azurerm/group_vars/all
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
|
||||||
|
# Due to some Azure limitations, this name must be globally unique
|
||||||
|
cluster_name: example
|
||||||
|
|
||||||
|
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||||
|
# node that can be used to access the masters and minions
|
||||||
|
use_bastion: false
|
||||||
|
|
||||||
|
number_of_k8s_masters: 3
|
||||||
|
number_of_k8s_nodes: 3
|
||||||
|
|
||||||
|
masters_vm_size: Standard_A2
|
||||||
|
masters_os_disk_size: 1000
|
||||||
|
|
||||||
|
minions_vm_size: Standard_A2
|
||||||
|
minions_os_disk_size: 1000
|
||||||
|
|
||||||
|
admin_username: devops
|
||||||
|
admin_password: changeme
|
||||||
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
# Azure CIDRs
|
||||||
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
|
azure_masters_cidr: 10.0.4.0/24
|
||||||
|
azure_minions_cidr: 10.240.0.0/16
|
||||||
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Query Azure VMs
|
||||||
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
|
- name: Generate inventory
|
||||||
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if not use_bastion or vm.name == 'bastion' %}
|
||||||
|
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||||
|
{% else %}
|
||||||
|
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'kube-master' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'etcd' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'kube-node' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
|
virtualNetworkName: "KubVNET"
|
||||||
|
|
||||||
|
subnetAdminName: "ad-subnet"
|
||||||
|
subnetMastersName: "master-subnet"
|
||||||
|
subnetMinionsName: "minion-subnet"
|
||||||
|
|
||||||
|
routeTableName: "routetable"
|
||||||
|
securityGroupName: "secgroup"
|
||||||
|
|
||||||
|
nameSuffix: "{{cluster_name}}"
|
||||||
|
|
||||||
|
availabilitySetMasters: "master-avs"
|
||||||
|
availabilitySetMinions: "minion-avs"
|
||||||
|
|
||||||
|
faultDomainCount: 3
|
||||||
|
updateDomainCount: 10
|
||||||
|
|
||||||
|
bastionVmSize: Standard_A0
|
||||||
|
bastionVMName: bastion
|
||||||
|
bastionIPAddressName: bastion-pubip
|
||||||
|
|
||||||
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
|
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||||
|
|
||||||
|
imageReference:
|
||||||
|
publisher: "OpenLogic"
|
||||||
|
offer: "CentOS"
|
||||||
|
sku: "7.2"
|
||||||
|
version: "latest"
|
||||||
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
|
storageAccountType: "Standard_LRS"
|
||||||
|
|
||||||
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
- set_fact:
|
||||||
|
base_dir: "{{playbook_dir}}/.generated/"
|
||||||
|
|
||||||
|
- file: path={{base_dir}} state=directory recurse=true
|
||||||
|
|
||||||
|
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
||||||
|
with_items:
|
||||||
|
- network.json
|
||||||
|
- storage.json
|
||||||
|
- availability-sets.json
|
||||||
|
- bastion.json
|
||||||
|
- masters.json
|
||||||
|
- minions.json
|
||||||
|
- clear-rg.json
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/availabilitySets",
|
||||||
|
"name": "{{availabilitySetMasters}}",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||||
|
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/availabilitySets",
|
||||||
|
"name": "{{availabilitySetMinions}}",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||||
|
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{% if use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "{{bastionIPAddressName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "{{bastionVMName}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "BastionIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]"
|
||||||
|
},
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('subnetAdminRef')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "{{bastionVMName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "bastion"
|
||||||
|
},
|
||||||
|
"properties": {
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{bastionVmSize}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "{{bastionVMName}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "true",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "osdisk",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {},
|
||||||
|
"variables": {},
|
||||||
|
"resources": [],
|
||||||
|
"outputs": {}
|
||||||
|
}
|
||||||
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"lbDomainName": "{{nameSuffix}}-api",
|
||||||
|
"lbPublicIPAddressName": "kubernetes-api-pubip",
|
||||||
|
"lbPublicIPAddressType": "Static",
|
||||||
|
"lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]",
|
||||||
|
"lbName": "kubernetes-api",
|
||||||
|
"lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]",
|
||||||
|
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "[variables('lbPublicIPAddressName')]",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]",
|
||||||
|
"dnsSettings": {
|
||||||
|
"domainNameLabel": "[variables('lbDomainName')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"name": "[variables('lbName')]",
|
||||||
|
"type": "Microsoft.Network/loadBalancers",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"frontendIPConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "kube-api-frontend",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[variables('lbPublicIPAddressID')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"backendAddressPools": [
|
||||||
|
{
|
||||||
|
"name": "kube-api-backend"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"loadBalancingRules": [
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"frontendIPConfiguration": {
|
||||||
|
"id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]"
|
||||||
|
},
|
||||||
|
"backendAddressPool": {
|
||||||
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
|
},
|
||||||
|
"protocol": "tcp",
|
||||||
|
"frontendPort": 443,
|
||||||
|
"backendPort": 443,
|
||||||
|
"enableFloatingIP": false,
|
||||||
|
"idleTimeoutInMinutes": 5,
|
||||||
|
"probe": {
|
||||||
|
"id": "[concat(variables('lbID'), '/probes/kube-api')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"probes": [
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "tcp",
|
||||||
|
"port": 443,
|
||||||
|
"intervalInSeconds": 5,
|
||||||
|
"numberOfProbes": 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% for i in range(number_of_k8s_masters) %}
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "master-{{i}}-pubip",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "master-{{i}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]",
|
||||||
|
{% endif %}
|
||||||
|
"[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "MastersIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]"
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('kubeMastersSubnetRef')]"
|
||||||
|
},
|
||||||
|
"loadBalancerBackendAddressPools": [
|
||||||
|
{
|
||||||
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"networkSecurityGroup": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||||
|
},
|
||||||
|
"enableIPForwarding": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "master-{{i}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "kube-master,etcd"
|
||||||
|
},
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"availabilitySet": {
|
||||||
|
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]"
|
||||||
|
},
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{masters_vm_size}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "master-{{i}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "ma{{nameSuffix}}{{i}}",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage",
|
||||||
|
"diskSizeGB": "{{masters_os_disk_size}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} {% if not loop.last %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{% for i in range(number_of_k8s_nodes) %}
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "minion-{{i}}-pubip",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "minion-{{i}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]"
|
||||||
|
{% endif %}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "MinionsIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]"
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('kubeMinionsSubnetRef')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"networkSecurityGroup": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||||
|
},
|
||||||
|
"enableIPForwarding": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "minion-{{i}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "kube-node"
|
||||||
|
},
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"availabilitySet": {
|
||||||
|
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]"
|
||||||
|
},
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{minions_vm_size}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "minion-{{i}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "mi{{nameSuffix}}{{i}}",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage",
|
||||||
|
"diskSizeGB": "{{minions_os_disk_size}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} {% if not loop.last %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/routeTables",
|
||||||
|
"name": "{{routeTableName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"routes": [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Network/virtualNetworks",
|
||||||
|
"name": "{{virtualNetworkName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"addressSpace": {
|
||||||
|
"addressPrefixes": [
|
||||||
|
"{{azure_vnet_cidr}}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"subnets": [
|
||||||
|
{
|
||||||
|
"name": "{{subnetMastersName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_masters_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "{{subnetMinionsName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_minions_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% if use_bastion %}
|
||||||
|
,{
|
||||||
|
"name": "{{subnetAdminName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_admin_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkSecurityGroups",
|
||||||
|
"name": "{{securityGroupName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"securityRules": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"name": "ssh",
|
||||||
|
"properties": {
|
||||||
|
"description": "Allow SSH",
|
||||||
|
"protocol": "Tcp",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "22",
|
||||||
|
"sourceAddressPrefix": "Internet",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 100,
|
||||||
|
"direction": "Inbound"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"description": "Allow secure kube-api",
|
||||||
|
"protocol": "Tcp",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "443",
|
||||||
|
"sourceAddressPrefix": "Internet",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 101,
|
||||||
|
"direction": "Inbound"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resources": [],
|
||||||
|
"dependsOn": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Storage/storageAccounts",
|
||||||
|
"name": "{{storageAccountName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"accountType": "{{storageAccountType}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
297
contrib/inventory_builder/inventory.py
Normal file
297
contrib/inventory_builder/inventory.py
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# Usage: inventory.py ip1 [ip2 ...]
|
||||||
|
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||||
|
#
|
||||||
|
# Advanced usage:
|
||||||
|
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
# Delete a host: inventory.py -10.10.1.3
|
||||||
|
# Delete a host by id: inventory.py -node1
|
||||||
|
#
|
||||||
|
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
|
||||||
|
# YAML file should be in the following format:
|
||||||
|
# group1:
|
||||||
|
# host1:
|
||||||
|
# ip: X.X.X.X
|
||||||
|
# var: val
|
||||||
|
# group2:
|
||||||
|
# host2:
|
||||||
|
# ip: X.X.X.X
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
try:
|
||||||
|
import configparser
|
||||||
|
except ImportError:
|
||||||
|
import ConfigParser as configparser
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
ROLES = ['kube-master', 'all', 'k8s-cluster:children', 'kube-node', 'etcd']
|
||||||
|
PROTECTED_NAMES = ROLES
|
||||||
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
|
'0': False, 'no': False, 'false': False, 'off': False}
|
||||||
|
|
||||||
|
|
||||||
|
def get_var_as_bool(name, default):
|
||||||
|
value = os.environ.get(name, '')
|
||||||
|
return _boolean_states.get(value.lower(), default)
|
||||||
|
|
||||||
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||||
|
DEBUG = get_var_as_bool("DEBUG", True)
|
||||||
|
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||||
|
|
||||||
|
|
||||||
|
class KargoInventory(object):
|
||||||
|
|
||||||
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
|
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||||
|
delimiters=('\t', ' '))
|
||||||
|
self.config_file = config_file
|
||||||
|
if self.config_file:
|
||||||
|
self.config.read(self.config_file)
|
||||||
|
|
||||||
|
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||||
|
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
self.ensure_required_groups(ROLES)
|
||||||
|
|
||||||
|
if changed_hosts:
|
||||||
|
self.hosts = self.build_hostnames(changed_hosts)
|
||||||
|
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||||
|
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||||
|
self.set_all(self.hosts)
|
||||||
|
self.set_k8s_cluster()
|
||||||
|
self.set_kube_node(self.hosts.keys())
|
||||||
|
self.set_etcd(list(self.hosts.keys())[:3])
|
||||||
|
else: # Show help if no options
|
||||||
|
self.show_help()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
self.write_config(self.config_file)
|
||||||
|
|
||||||
|
def write_config(self, config_file):
|
||||||
|
if config_file:
|
||||||
|
with open(config_file, 'w') as f:
|
||||||
|
self.config.write(f)
|
||||||
|
else:
|
||||||
|
print("WARNING: Unable to save config. Make sure you set "
|
||||||
|
"CONFIG_FILE env var.")
|
||||||
|
|
||||||
|
def debug(self, msg):
|
||||||
|
if DEBUG:
|
||||||
|
print("DEBUG: {0}".format(msg))
|
||||||
|
|
||||||
|
def get_ip_from_opts(self, optstring):
|
||||||
|
opts = optstring.split(' ')
|
||||||
|
for opt in opts:
|
||||||
|
if '=' not in opt:
|
||||||
|
continue
|
||||||
|
k, v = opt.split('=')
|
||||||
|
if k == "ip":
|
||||||
|
return v
|
||||||
|
raise ValueError("IP parameter not found in options")
|
||||||
|
|
||||||
|
def ensure_required_groups(self, groups):
|
||||||
|
for group in groups:
|
||||||
|
try:
|
||||||
|
self.debug("Adding group {0}".format(group))
|
||||||
|
self.config.add_section(group)
|
||||||
|
except configparser.DuplicateSectionError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_host_id(self, host):
|
||||||
|
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||||
|
try:
|
||||||
|
short_hostname = host.split('.')[0]
|
||||||
|
return int(re.findall("\d+$", short_hostname)[-1])
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError("Host name must end in an integer")
|
||||||
|
|
||||||
|
def build_hostnames(self, changed_hosts):
|
||||||
|
existing_hosts = OrderedDict()
|
||||||
|
highest_host_id = 0
|
||||||
|
try:
|
||||||
|
for host, opts in self.config.items('all'):
|
||||||
|
existing_hosts[host] = opts
|
||||||
|
host_id = self.get_host_id(host)
|
||||||
|
if host_id > highest_host_id:
|
||||||
|
highest_host_id = host_id
|
||||||
|
except configparser.NoSectionError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||||
|
next_host_id = highest_host_id + 1
|
||||||
|
|
||||||
|
all_hosts = existing_hosts.copy()
|
||||||
|
for host in changed_hosts:
|
||||||
|
if host[0] == "-":
|
||||||
|
realhost = host[1:]
|
||||||
|
if self.exists_hostname(all_hosts, realhost):
|
||||||
|
self.debug("Marked {0} for deletion.".format(realhost))
|
||||||
|
all_hosts.pop(realhost)
|
||||||
|
elif self.exists_ip(all_hosts, realhost):
|
||||||
|
self.debug("Marked {0} for deletion.".format(realhost))
|
||||||
|
self.delete_host_by_ip(all_hosts, realhost)
|
||||||
|
elif host[0].isdigit():
|
||||||
|
if self.exists_hostname(all_hosts, host):
|
||||||
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
|
continue
|
||||||
|
elif self.exists_ip(all_hosts, host):
|
||||||
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
|
continue
|
||||||
|
|
||||||
|
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||||
|
next_host_id += 1
|
||||||
|
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
|
||||||
|
host, host)
|
||||||
|
elif host[0].isalpha():
|
||||||
|
raise Exception("Adding hosts by hostname is not supported.")
|
||||||
|
|
||||||
|
return all_hosts
|
||||||
|
|
||||||
|
def exists_hostname(self, existing_hosts, hostname):
|
||||||
|
return hostname in existing_hosts.keys()
|
||||||
|
|
||||||
|
def exists_ip(self, existing_hosts, ip):
|
||||||
|
for host_opts in existing_hosts.values():
|
||||||
|
if ip == self.get_ip_from_opts(host_opts):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def delete_host_by_ip(self, existing_hosts, ip):
|
||||||
|
for hostname, host_opts in existing_hosts.items():
|
||||||
|
if ip == self.get_ip_from_opts(host_opts):
|
||||||
|
del existing_hosts[hostname]
|
||||||
|
return
|
||||||
|
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||||
|
|
||||||
|
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||||
|
for role in self.config.sections():
|
||||||
|
for host, _ in self.config.items(role):
|
||||||
|
if host not in hostnames and host not in protected_names:
|
||||||
|
self.debug("Host {0} removed from role {1}".format(host,
|
||||||
|
role))
|
||||||
|
self.config.remove_option(role, host)
|
||||||
|
|
||||||
|
def add_host_to_group(self, group, host, opts=""):
|
||||||
|
self.debug("adding host {0} to group {1}".format(host, group))
|
||||||
|
self.config.set(group, host, opts)
|
||||||
|
|
||||||
|
def set_kube_master(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
self.add_host_to_group('kube-master', host)
|
||||||
|
|
||||||
|
def set_all(self, hosts):
|
||||||
|
for host, opts in hosts.items():
|
||||||
|
self.add_host_to_group('all', host, opts)
|
||||||
|
|
||||||
|
def set_k8s_cluster(self):
|
||||||
|
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||||
|
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||||
|
|
||||||
|
def set_kube_node(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
self.add_host_to_group('kube-node', host)
|
||||||
|
|
||||||
|
def set_etcd(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
self.add_host_to_group('etcd', host)
|
||||||
|
|
||||||
|
def load_file(self, files=None):
|
||||||
|
'''Directly loads JSON, or YAML file to inventory.'''
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
raise Exception("No input file specified.")
|
||||||
|
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
for filename in list(files):
|
||||||
|
# Try JSON, then YAML
|
||||||
|
try:
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
data = yaml.load(f)
|
||||||
|
print("yaml")
|
||||||
|
except ValueError:
|
||||||
|
raise Exception("Cannot read %s as JSON, YAML, or CSV",
|
||||||
|
filename)
|
||||||
|
|
||||||
|
self.ensure_required_groups(ROLES)
|
||||||
|
self.set_k8s_cluster()
|
||||||
|
for group, hosts in data.items():
|
||||||
|
self.ensure_required_groups([group])
|
||||||
|
for host, opts in hosts.items():
|
||||||
|
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
|
||||||
|
for key, val in opts.items():
|
||||||
|
if key == "ip":
|
||||||
|
continue
|
||||||
|
optstring += " {0}={1}".format(key, val)
|
||||||
|
|
||||||
|
self.add_host_to_group('all', host, optstring)
|
||||||
|
self.add_host_to_group(group, host)
|
||||||
|
self.write_config(self.config_file)
|
||||||
|
|
||||||
|
def parse_command(self, command, args=None):
|
||||||
|
if command == 'help':
|
||||||
|
self.show_help()
|
||||||
|
elif command == 'print_cfg':
|
||||||
|
self.print_config()
|
||||||
|
elif command == 'print_ips':
|
||||||
|
self.print_ips()
|
||||||
|
elif command == 'load':
|
||||||
|
self.load_file(args)
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid command specified.")
|
||||||
|
|
||||||
|
def show_help(self):
|
||||||
|
help_text = '''Usage: inventory.py ip1 [ip2 ...]
|
||||||
|
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||||
|
|
||||||
|
Available commands:
|
||||||
|
help - Display this message
|
||||||
|
print_cfg - Write inventory file to stdout
|
||||||
|
print_ips - Write a space-delimited list of IPs from "all" group
|
||||||
|
|
||||||
|
Advanced usage:
|
||||||
|
Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
Delete a host: inventory.py -10.10.1.3
|
||||||
|
Delete a host by id: inventory.py -node1'''
|
||||||
|
print(help_text)
|
||||||
|
|
||||||
|
def print_config(self):
|
||||||
|
self.config.write(sys.stdout)
|
||||||
|
|
||||||
|
def print_ips(self):
|
||||||
|
ips = []
|
||||||
|
for host, opts in self.config.items('all'):
|
||||||
|
ips.append(self.get_ip_from_opts(opts))
|
||||||
|
print(' '.join(ips))
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=None):
|
||||||
|
if not argv:
|
||||||
|
argv = sys.argv[1:]
|
||||||
|
KargoInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
1
contrib/inventory_builder/requirements.txt
Normal file
1
contrib/inventory_builder/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
configparser>=3.3.0
|
||||||
3
contrib/inventory_builder/setup.cfg
Normal file
3
contrib/inventory_builder/setup.cfg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[metadata]
|
||||||
|
name = kargo-inventory-builder
|
||||||
|
version = 0.1
|
||||||
29
contrib/inventory_builder/setup.py
Normal file
29
contrib/inventory_builder/setup.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||||
|
import setuptools
|
||||||
|
|
||||||
|
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||||
|
# setuptools if some other modules registered functions in `atexit`.
|
||||||
|
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||||
|
try:
|
||||||
|
import multiprocessing # noqa
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
setup_requires=[],
|
||||||
|
pbr=False)
|
||||||
3
contrib/inventory_builder/test-requirements.txt
Normal file
3
contrib/inventory_builder/test-requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
hacking>=0.10.2
|
||||||
|
pytest>=2.8.0
|
||||||
|
mock>=1.3.0
|
||||||
212
contrib/inventory_builder/tests/test_inventory.py
Normal file
212
contrib/inventory_builder/tests/test_inventory.py
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
import sys
|
||||||
|
|
||||||
|
path = "./contrib/inventory_builder/"
|
||||||
|
if path not in sys.path:
|
||||||
|
sys.path.append(path)
|
||||||
|
|
||||||
|
import inventory
|
||||||
|
|
||||||
|
|
||||||
|
class TestInventory(unittest.TestCase):
|
||||||
|
@mock.patch('inventory.sys')
|
||||||
|
def setUp(self, sys_mock):
|
||||||
|
sys_mock.exit = mock.Mock()
|
||||||
|
super(TestInventory, self).setUp()
|
||||||
|
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||||
|
self.inv = inventory.KargoInventory()
|
||||||
|
|
||||||
|
def test_get_ip_from_opts(self):
|
||||||
|
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||||
|
expected = "10.90.3.2"
|
||||||
|
result = self.inv.get_ip_from_opts(optstring)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_get_ip_from_opts_invalid(self):
|
||||||
|
optstring = "notanaddr=value something random!chars:D"
|
||||||
|
self.assertRaisesRegexp(ValueError, "IP parameter not found",
|
||||||
|
self.inv.get_ip_from_opts, optstring)
|
||||||
|
|
||||||
|
def test_ensure_required_groups(self):
|
||||||
|
groups = ['group1', 'group2']
|
||||||
|
self.inv.ensure_required_groups(groups)
|
||||||
|
for group in groups:
|
||||||
|
self.assertTrue(group in self.inv.config.sections())
|
||||||
|
|
||||||
|
def test_get_host_id(self):
|
||||||
|
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||||
|
'node3.xyz123.aaa']
|
||||||
|
expected = [99, 1, 1, 1, 3]
|
||||||
|
for hostname, expected in zip(hostnames, expected):
|
||||||
|
result = self.inv.get_host_id(hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_get_host_id_invalid(self):
|
||||||
|
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||||
|
for hostname in bad_hostnames:
|
||||||
|
self.assertRaisesRegexp(ValueError, "Host name must end in an",
|
||||||
|
self.inv.get_host_id, hostname)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_one(self):
|
||||||
|
changed_hosts = ['10.90.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_duplicate(self):
|
||||||
|
changed_hosts = ['10.90.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||||
|
self.inv.config['all'] = expected
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_two(self):
|
||||||
|
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.config['all'] = OrderedDict()
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_delete_first(self):
|
||||||
|
changed_hosts = ['-10.90.0.2']
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.config['all'] = existing_hosts
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_hostname_positive(self):
|
||||||
|
hostname = 'node1'
|
||||||
|
expected = True
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_hostname_negative(self):
|
||||||
|
hostname = 'node99'
|
||||||
|
expected = False
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_ip_positive(self):
|
||||||
|
ip = '10.90.0.2'
|
||||||
|
expected = True
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_ip_negative(self):
|
||||||
|
ip = '10.90.0.200'
|
||||||
|
expected = False
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_delete_host_by_ip_positive(self):
|
||||||
|
ip = '10.90.0.2'
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, existing_hosts)
|
||||||
|
|
||||||
|
def test_delete_host_by_ip_negative(self):
|
||||||
|
ip = '10.90.0.200'
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||||
|
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||||
|
|
||||||
|
def test_purge_invalid_hosts(self):
|
||||||
|
proper_hostnames = ['node1', 'node2']
|
||||||
|
bad_host = 'doesnotbelong2'
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3'),
|
||||||
|
('doesnotbelong2', 'whateveropts=ilike')])
|
||||||
|
self.inv.config['all'] = existing_hosts
|
||||||
|
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||||
|
self.assertTrue(bad_host not in self.inv.config['all'].keys())
|
||||||
|
|
||||||
|
def test_add_host_to_group(self):
|
||||||
|
group = 'etcd'
|
||||||
|
host = 'node1'
|
||||||
|
opts = 'ip=10.90.0.2'
|
||||||
|
|
||||||
|
self.inv.add_host_to_group(group, host, opts)
|
||||||
|
self.assertEqual(self.inv.config[group].get(host), opts)
|
||||||
|
|
||||||
|
def test_set_kube_master(self):
|
||||||
|
group = 'kube-master'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_kube_master([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_all(self):
|
||||||
|
group = 'all'
|
||||||
|
hosts = OrderedDict([
|
||||||
|
('node1', 'opt1'),
|
||||||
|
('node2', 'opt2')])
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
for host, opt in hosts.items():
|
||||||
|
self.assertEqual(self.inv.config[group].get(host), opt)
|
||||||
|
|
||||||
|
def test_set_k8s_cluster(self):
|
||||||
|
group = 'k8s-cluster:children'
|
||||||
|
expected_hosts = ['kube-node', 'kube-master']
|
||||||
|
|
||||||
|
self.inv.set_k8s_cluster()
|
||||||
|
for host in expected_hosts:
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_kube_node(self):
|
||||||
|
group = 'kube-node'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_kube_node([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_etcd(self):
|
||||||
|
group = 'etcd'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_etcd([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
28
contrib/inventory_builder/tox.ini
Normal file
28
contrib/inventory_builder/tox.ini
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
[tox]
|
||||||
|
minversion = 1.6
|
||||||
|
skipsdist = True
|
||||||
|
envlist = pep8, py27
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
whitelist_externals = py.test
|
||||||
|
usedevelop = True
|
||||||
|
deps =
|
||||||
|
-r{toxinidir}/requirements.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
|
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||||
|
commands = py.test -vv #{posargs:./tests}
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
usedevelop = False
|
||||||
|
whitelist_externals = bash
|
||||||
|
commands =
|
||||||
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
[testenv:venv]
|
||||||
|
commands = {posargs}
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
show-source = true
|
||||||
|
builtins = _
|
||||||
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg
|
||||||
92
contrib/network-storage/glusterfs/README.md
Normal file
92
contrib/network-storage/glusterfs/README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
||||||
|
|
||||||
|
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||||
|
|
||||||
|
## Using an Ansible inventory
|
||||||
|
|
||||||
|
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||||
|
|
||||||
|
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||||
|
|
||||||
|
```
|
||||||
|
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||||
|
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||||
|
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Terraform and Ansible
|
||||||
|
|
||||||
|
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_name = "cluster1"
|
||||||
|
number_of_k8s_masters = "1"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "2"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "0"
|
||||||
|
number_of_k8s_nodes = "0"
|
||||||
|
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||||
|
image = "Ubuntu 16.04"
|
||||||
|
ssh_user = "ubuntu"
|
||||||
|
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||||
|
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||||
|
network_name = "k8s-network"
|
||||||
|
floatingip_pool = "net_external"
|
||||||
|
|
||||||
|
# GlusterFS variables
|
||||||
|
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||||
|
image_gfs = "Ubuntu 16.04"
|
||||||
|
number_of_gfs_nodes_no_floating_ip = "3"
|
||||||
|
gfs_volume_size_in_gb = "50"
|
||||||
|
ssh_user_gfs = "ubuntu"
|
||||||
|
```
|
||||||
|
|
||||||
|
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ source ~/.stackrc
|
||||||
|
$ eval $(ssh-agent -s)
|
||||||
|
$ ssh-add ~/.ssh/my-desired-key
|
||||||
|
$ echo Setting up Terraform creds && \
|
||||||
|
export TF_VAR_username=${OS_USERNAME} && \
|
||||||
|
export TF_VAR_password=${OS_PASSWORD} && \
|
||||||
|
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||||
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||||
|
|
||||||
|
```
|
||||||
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||||
|
|
||||||
|
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to destroy the cluster, you can run:
|
||||||
|
|
||||||
|
```
|
||||||
|
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: gfs-cluster
|
||||||
|
roles:
|
||||||
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
roles:
|
||||||
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
roles:
|
||||||
|
- { role: kubernetes-pv/lib }
|
||||||
|
- { role: kubernetes-pv }
|
||||||
|
|
||||||
44
contrib/network-storage/glusterfs/inventory.example
Normal file
44
contrib/network-storage/glusterfs/inventory.example
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||||
|
# ## different ip than the default iface
|
||||||
|
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||||
|
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||||
|
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||||
|
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||||
|
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||||
|
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||||
|
#
|
||||||
|
# ## GlusterFS nodes
|
||||||
|
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||||
|
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||||
|
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||||
|
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||||
|
# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||||
|
|
||||||
|
# [kube-master]
|
||||||
|
# node1
|
||||||
|
# node2
|
||||||
|
|
||||||
|
# [etcd]
|
||||||
|
# node1
|
||||||
|
# node2
|
||||||
|
# node3
|
||||||
|
|
||||||
|
# [kube-node]
|
||||||
|
# node2
|
||||||
|
# node3
|
||||||
|
# node4
|
||||||
|
# node5
|
||||||
|
# node6
|
||||||
|
|
||||||
|
# [k8s-cluster:children]
|
||||||
|
# kube-node
|
||||||
|
# kube-master
|
||||||
|
|
||||||
|
# [gfs-cluster]
|
||||||
|
# gfs_node1
|
||||||
|
# gfs_node2
|
||||||
|
# gfs_node3
|
||||||
|
|
||||||
|
# [network-storage:children]
|
||||||
|
# gfs-cluster
|
||||||
|
|
||||||
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Ansible Role: GlusterFS
|
||||||
|
|
||||||
|
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||||
|
|
||||||
|
Installs and configures GlusterFS on Linux.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||||
|
|
||||||
|
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||||
|
|
||||||
|
## Role Variables
|
||||||
|
|
||||||
|
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||||
|
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
|
||||||
|
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||||
|
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.5"
|
||||||
|
|
||||||
|
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
None.
|
||||||
|
|
||||||
|
## Example Playbook
|
||||||
|
|
||||||
|
- hosts: server
|
||||||
|
roles:
|
||||||
|
- geerlingguy.glusterfs
|
||||||
|
|
||||||
|
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT / BSD
|
||||||
|
|
||||||
|
## Author Information
|
||||||
|
|
||||||
|
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
# For Ubuntu.
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.8"
|
||||||
|
|
||||||
|
# Gluster configuration.
|
||||||
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||||
|
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||||
|
gluster_brick_name: gluster
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
dependencies: []
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: geerlingguy
|
||||||
|
description: GlusterFS installation for Linux.
|
||||||
|
company: "Midwestern Mac, LLC"
|
||||||
|
license: "license (BSD, MIT)"
|
||||||
|
min_ansible_version: 2.0
|
||||||
|
platforms:
|
||||||
|
- name: EL
|
||||||
|
versions:
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- name: Ubuntu
|
||||||
|
versions:
|
||||||
|
- precise
|
||||||
|
- trusty
|
||||||
|
- xenial
|
||||||
|
- name: Debian
|
||||||
|
versions:
|
||||||
|
- wheezy
|
||||||
|
- jessie
|
||||||
|
galaxy_tags:
|
||||||
|
- system
|
||||||
|
- networking
|
||||||
|
- cloud
|
||||||
|
- clustering
|
||||||
|
- files
|
||||||
|
- sharing
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||||
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
|
# Setup/install tasks.
|
||||||
|
- include: setup-RedHat.yml
|
||||||
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
|
- include: setup-Debian.yml
|
||||||
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
|
- name: Ensure Gluster mount directories exist.
|
||||||
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
|
with_items:
|
||||||
|
- "{{ gluster_mount_dir }}"
|
||||||
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: Add PPA for GlusterFS.
|
||||||
|
apt_repository:
|
||||||
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
register: glusterfs_ppa_added
|
||||||
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
|
when: glusterfs_ppa_added.changed
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS client is installed.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
default_release: "{{ glusterfs_default_release }}"
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Install Prerequisites
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
|
- name: Install Packages
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
# For Ubuntu.
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.8"
|
||||||
|
|
||||||
|
# Gluster configuration.
|
||||||
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||||
|
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||||
|
gluster_brick_name: gluster
|
||||||
|
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||||
|
disk_volume_device_1: /dev/vdb
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
dependencies: []
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: geerlingguy
|
||||||
|
description: GlusterFS installation for Linux.
|
||||||
|
company: "Midwestern Mac, LLC"
|
||||||
|
license: "license (BSD, MIT)"
|
||||||
|
min_ansible_version: 2.0
|
||||||
|
platforms:
|
||||||
|
- name: EL
|
||||||
|
versions:
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- name: Ubuntu
|
||||||
|
versions:
|
||||||
|
- precise
|
||||||
|
- trusty
|
||||||
|
- xenial
|
||||||
|
- name: Debian
|
||||||
|
versions:
|
||||||
|
- wheezy
|
||||||
|
- jessie
|
||||||
|
galaxy_tags:
|
||||||
|
- system
|
||||||
|
- networking
|
||||||
|
- cloud
|
||||||
|
- clustering
|
||||||
|
- files
|
||||||
|
- sharing
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
---
|
||||||
|
# Include variables and define needed variables.
|
||||||
|
- name: Include OS-specific variables.
|
||||||
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
|
# Instal xfs package
|
||||||
|
- name: install xfs Debian
|
||||||
|
apt: name=xfsprogs state=present
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: install xfs RedHat
|
||||||
|
yum: name=xfsprogs state=present
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
# Format external volumes in xfs
|
||||||
|
- name: Format volumes in xfs
|
||||||
|
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
|
# Mount external volumes
|
||||||
|
- name: mounting new xfs filesystem
|
||||||
|
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||||
|
|
||||||
|
# Setup/install tasks.
|
||||||
|
- include: setup-RedHat.yml
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
|
- include: setup-Debian.yml
|
||||||
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
|
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||||
|
|
||||||
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
|
with_items:
|
||||||
|
- "{{ gluster_brick_dir }}"
|
||||||
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
|
- name: Configure Gluster volume.
|
||||||
|
gluster_volume:
|
||||||
|
state: present
|
||||||
|
name: "{{ gluster_brick_name }}"
|
||||||
|
brick: "{{ gluster_brick_dir }}"
|
||||||
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
|
host: "{{ inventory_hostname }}"
|
||||||
|
force: yes
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Mount glusterfs to retrieve disk size
|
||||||
|
mount:
|
||||||
|
name: "{{ gluster_mount_dir }}"
|
||||||
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
|
fstype: glusterfs
|
||||||
|
opts: "defaults,_netdev"
|
||||||
|
state: mounted
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Get Gluster disk size
|
||||||
|
setup: filter=ansible_mounts
|
||||||
|
register: mounts_data
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Set Gluster disk size to variable
|
||||||
|
set_fact:
|
||||||
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Create file on GlusterFS
|
||||||
|
template:
|
||||||
|
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||||
|
src: test-file.txt
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Unmount glusterfs
|
||||||
|
mount:
|
||||||
|
name: "{{ gluster_mount_dir }}"
|
||||||
|
fstype: glusterfs
|
||||||
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
|
state: unmounted
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: Add PPA for GlusterFS.
|
||||||
|
apt_repository:
|
||||||
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
register: glusterfs_ppa_added
|
||||||
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
|
when: glusterfs_ppa_added.changed
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS is installed.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
default_release: "{{ glusterfs_default_release }}"
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: Install Prerequisites
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
|
- name: Install Packages
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
test file
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role_under_test
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
glusterfs_daemon: glusterfs-server
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
glusterfs_daemon: glusterd
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||||
|
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||||
|
with_items:
|
||||||
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
register: gluster_pv
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||||
|
kube:
|
||||||
|
name: glusterfs
|
||||||
|
namespace: default
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ gluster_pv.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"kind": "Endpoints",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs"
|
||||||
|
},
|
||||||
|
"subsets": [
|
||||||
|
{% for host in groups['gfs-cluster'] %}
|
||||||
|
{
|
||||||
|
"addresses": [
|
||||||
|
{
|
||||||
|
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}{%- if not loop.last %}, {% endif -%}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: glusterfs
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
glusterfs:
|
||||||
|
endpoints: glusterfs
|
||||||
|
path: gluster
|
||||||
|
readOnly: false
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
1
contrib/network-storage/glusterfs/roles/kubernetes-pv/lib
Symbolic link
1
contrib/network-storage/glusterfs/roles/kubernetes-pv/lib
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../../roles/kubernetes-apps/lib
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
dependencies:
|
||||||
|
- {role: kubernetes-pv/ansible, tags: apps}
|
||||||
@@ -32,6 +32,6 @@ resource "null_resource" "ansible-provision" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\netcd\" >> inventory"
|
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,14 +5,13 @@ Openstack.
|
|||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
|
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
|
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||||
should work on most modern installs of OpenStack that support the basic
|
|
||||||
services.
|
services.
|
||||||
|
|
||||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||||
|
|
||||||
* floating-ips are used for access
|
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
||||||
* you already have a suitable OS image in glance
|
* you already have a suitable OS image in glance
|
||||||
* you already have both an internal network and a floating-ip pool created
|
* you already have both an internal network and a floating-ip pool created
|
||||||
* you have security-groups enabled
|
* you have security-groups enabled
|
||||||
@@ -24,29 +23,27 @@ There are some assumptions made to try and ensure it will work on your openstack
|
|||||||
|
|
||||||
## Terraform
|
## Terraform
|
||||||
|
|
||||||
Terraform will be used to provision all of the OpenStack resources required to
|
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||||
run Docker Swarm. It is also used to deploy and provision the software
|
|
||||||
requirements.
|
requirements.
|
||||||
|
|
||||||
### Prep
|
### Prep
|
||||||
|
|
||||||
#### OpenStack
|
#### OpenStack
|
||||||
|
|
||||||
Ensure your OpenStack credentials are loaded in environment variables. This is
|
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||||
how I do it:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ source ~/.stackrc
|
$ source ~/.stackrc
|
||||||
```
|
```
|
||||||
|
|
||||||
You will need two networks before installing, an internal network and
|
You will need two networks before installing, an internal network and
|
||||||
an external (floating IP Pool) network. The internet network can be shared as
|
an external (floating IP Pool) network. The internet network can be shared as
|
||||||
we use security groups to provide network segregation. Due to the many
|
we use security groups to provide network segregation. Due to the many
|
||||||
differences between OpenStack installs the Terraform does not attempt to create
|
differences between OpenStack installs the Terraform does not attempt to create
|
||||||
these for you.
|
these for you.
|
||||||
|
|
||||||
By default Terraform will expect that your networks are called `internal` and
|
By default Terraform will expect that your networks are called `internal` and
|
||||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
|
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||||
|
|
||||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||||
|
|
||||||
@@ -76,8 +73,36 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
number_of_k8s_masters = "1"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "2"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "1"
|
||||||
|
number_of_k8s_nodes = "0"
|
||||||
|
```
|
||||||
|
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||||
|
|
||||||
|
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
||||||
|
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||||
|
# This is the name of an image already available in your openstack installation.
|
||||||
|
image_gfs = "Ubuntu 15.10"
|
||||||
|
number_of_gfs_nodes_no_floating_ip = "3"
|
||||||
|
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||||
|
gfs_volume_size_in_gb = "50"
|
||||||
|
# The user needed for the image choosen for GlusterFS.
|
||||||
|
ssh_user_gfs = "ubuntu"
|
||||||
|
```
|
||||||
|
|
||||||
|
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
|
If not using a tfvars file for your setup, then execute:
|
||||||
```
|
```
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||||
@@ -96,24 +121,33 @@ use the `terraform show` command.
|
|||||||
State path: contrib/terraform/openstack/terraform.tfstate
|
State path: contrib/terraform/openstack/terraform.tfstate
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||||
|
```
|
||||||
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||||
|
|
||||||
Make sure you can connect to the hosts:
|
Make sure you can connect to the hosts:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||||
example-k8s_node-1 | SUCCESS => {
|
example-k8s_node-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
example-etcd-1 | SUCCESS => {
|
example-etcd-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
example-k8s-master-1 | SUCCESS => {
|
example-k8s-master-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||||
|
|
||||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||||
|
|
||||||
Deploy kubernetes:
|
Deploy kubernetes:
|
||||||
|
|||||||
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
||||||
@@ -1,11 +1,16 @@
|
|||||||
|
# Valid bootstrap options (required): ubuntu, coreos, none
|
||||||
|
bootstrap_os: none
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
local_release_dir: "/tmp/releases"
|
local_release_dir: "/tmp/releases"
|
||||||
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
|
retry_stagger: 5
|
||||||
|
|
||||||
# Uncomment this line for CoreOS only.
|
# Uncomment this line for Container Linux by CoreOS only.
|
||||||
# Directory where python binary is installed
|
# Directory where python binary is installed
|
||||||
# ansible_python_interpreter: "/opt/bin/python"
|
# ansible_python_interpreter: "/opt/bin/python"
|
||||||
|
|
||||||
@@ -28,6 +33,10 @@ kube_users:
|
|||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||||
|
ndots: 5
|
||||||
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||||
|
deploy_netchecker: false
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
# For some environments, each node has a pubilcally accessible
|
||||||
# address and an address it should bind services to. These are
|
# address and an address it should bind services to. These are
|
||||||
@@ -51,6 +60,16 @@ cluster_name: cluster.local
|
|||||||
# but don't know about that address themselves.
|
# but don't know about that address themselves.
|
||||||
# access_ip: 1.1.1.1
|
# access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
# Etcd access modes:
|
||||||
|
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||||
|
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||||
|
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||||
|
etcd_multiaccess: true
|
||||||
|
|
||||||
|
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||||
|
# kube_apiserver_port (default 443)
|
||||||
|
loadbalancer_apiserver_localhost: true
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico, weave or flannel)
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
|
|
||||||
@@ -86,51 +105,61 @@ kube_apiserver_insecure_port: 8080 # (http)
|
|||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||||
# as it greatly simplifies configuration of your applications - you can use
|
# as it greatly simplifies configuration of your applications - you can use
|
||||||
# service names instead of magic environment variables.
|
# service names instead of magic environment variables.
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
|
||||||
# Kubernetes won't do this for you (yet).
|
|
||||||
|
|
||||||
# Upstream dns servers used by dnsmasq
|
# Can be dnsmasq_kubedns, kubedns or none
|
||||||
upstream_dns_servers:
|
dns_mode: dnsmasq_kubedns
|
||||||
- 8.8.8.8
|
|
||||||
- 8.8.4.4
|
# Can be docker_dns, host_resolvconf or none
|
||||||
#
|
resolvconf_mode: docker_dns
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
|
||||||
dns_setup: true
|
## Upstream dns servers used by dnsmasq
|
||||||
|
#upstream_dns_servers:
|
||||||
|
# - 8.8.8.8
|
||||||
|
# - 8.8.4.4
|
||||||
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
#
|
|
||||||
# # Ip address of the kubernetes skydns service
|
# Ip address of the kubernetes skydns service
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
|
|
||||||
# There are some changes specific to the cloud providers
|
# There are some changes specific to the cloud providers
|
||||||
# for instance we need to encapsulate packets with some network plugins
|
# for instance we need to encapsulate packets with some network plugins
|
||||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||||
# When openstack is used make sure to source in the openstack credentials
|
# When openstack is used make sure to source in the openstack credentials
|
||||||
# like you would do when using nova-client before starting the playbook.
|
# like you would do when using nova-client before starting the playbook.
|
||||||
|
# When azure is used, you need to also set the following variables.
|
||||||
# cloud_provider:
|
# cloud_provider:
|
||||||
|
|
||||||
# For multi masters architecture:
|
# see docs/azure.md for details on how to get these values
|
||||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
#azure_tenant_id:
|
||||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
#azure_subscription_id:
|
||||||
# configuration example with haproxy :
|
#azure_aad_client_id:
|
||||||
# listen kubernetes-apiserver-https
|
#azure_aad_client_secret:
|
||||||
# bind 10.99.0.21:8383
|
#azure_resource_group:
|
||||||
# option ssl-hello-chk
|
#azure_location:
|
||||||
# mode tcp
|
#azure_subnet_name:
|
||||||
# timeout client 3h
|
#azure_security_group_name:
|
||||||
# timeout server 3h
|
#azure_vnet_name:
|
||||||
# server master1 10.99.0.26:443
|
|
||||||
# server master2 10.99.0.27:443
|
|
||||||
# balance roundrobin
|
|
||||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
## Set these proxy values in order to update docker daemon to use proxies
|
||||||
# http_proxy: ""
|
# http_proxy: ""
|
||||||
# https_proxy: ""
|
# https_proxy: ""
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
# Path used to store Docker data
|
||||||
|
docker_daemon_graph: "/var/lib/docker"
|
||||||
|
|
||||||
## A string of extra options to pass to the docker daemon.
|
## A string of extra options to pass to the docker daemon.
|
||||||
## This string should be exactly as you wish it to appear.
|
## This string should be exactly as you wish it to appear.
|
||||||
## An obvious use case is allowing insecure-registry access
|
## An obvious use case is allowing insecure-registry access
|
||||||
## to self hosted registries like so:
|
## to self hosted registries like so:
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||||
|
|
||||||
|
# K8s image pull policy (imagePullPolicy)
|
||||||
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
|
# default packages to install within the cluster
|
||||||
|
kpm_packages: []
|
||||||
|
# - name: kube-system/grafana
|
||||||
|
|||||||
@@ -70,6 +70,28 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
@@ -89,6 +111,57 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||||
|
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
description = "Non-ephemeral volume for GlusterFS"
|
||||||
|
size = "${var.gfs_volume_size_in_gb}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image_gfs}"
|
||||||
|
flavor_id = "${var.flavor_gfs_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
kubespray_groups = "gfs-cluster,network-storage"
|
||||||
|
}
|
||||||
|
volume {
|
||||||
|
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#output "msg" {
|
#output "msg" {
|
||||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||||
#}
|
#}
|
||||||
|
|||||||
@@ -1,238 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"serial": 17,
|
|
||||||
"modules": [
|
|
||||||
{
|
|
||||||
"path": [
|
|
||||||
"root"
|
|
||||||
],
|
|
||||||
"outputs": {},
|
|
||||||
"resources": {
|
|
||||||
"openstack_compute_instance_v2.k8s_master.0": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.12",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.12",
|
|
||||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-master-1",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.86",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.12",
|
|
||||||
"network.0.mac": "fa:16:3e:fb:82:1d",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "2",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"security_groups.3772290257": "example-k8s-master",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_instance_v2.k8s_master.1": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.70",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.70",
|
|
||||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-master-2",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.85",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.70",
|
|
||||||
"network.0.mac": "fa:16:3e:33:98:e6",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "2",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"security_groups.3772290257": "example-k8s-master",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_instance_v2.k8s_node": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_node"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.76",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.76",
|
|
||||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-node-1",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.84",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.76",
|
|
||||||
"network.0.mac": "fa:16:3e:53:57:bc",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "1",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_keypair_v2.k8s": {
|
|
||||||
"type": "openstack_compute_keypair_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "kubernetes-example",
|
|
||||||
"attributes": {
|
|
||||||
"id": "kubernetes-example",
|
|
||||||
"name": "kubernetes-example",
|
|
||||||
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9nU6RPYCabjLH1LvJfpp9L8r8q5RZ6niS92zD95xpm2b2obVydWe0tCSFdmULBuvT8Q8YQ4qOG2g/oJlsGOsia+4CQjYEUV9CgTH9H5HK3vUOwtO5g2eFnYKSmI/4znHa0WYpQFnQK2kSSeCs2beTlJhc8vjfN/2HHmuny6SxNSbnCk/nZdwamxEONIVdjlm3CSBlq4PChT/D/uUqm/nOm0Zqdk9ZlTBkucsjiOCJeEzg4HioKmIH8ewqsKuS7kMADHPH98JMdBhTKbYbLrxTC/RfiaON58WJpmdOA935TT5Td5aVQZoqe/i/5yFRp5fMG239jtfbM0Igu44TEIib pczarkowski@Pauls-MacBook-Pro.local\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_secgroup_v2.k8s": {
|
|
||||||
"type": "openstack_compute_secgroup_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
|
||||||
"attributes": {
|
|
||||||
"description": "example - Kubernetes",
|
|
||||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
|
||||||
"name": "example-k8s",
|
|
||||||
"rule.#": "5",
|
|
||||||
"rule.112275015.cidr": "",
|
|
||||||
"rule.112275015.from_group_id": "",
|
|
||||||
"rule.112275015.from_port": "1",
|
|
||||||
"rule.112275015.id": "597170c9-b35a-45c0-8717-652a342f3fd6",
|
|
||||||
"rule.112275015.ip_protocol": "tcp",
|
|
||||||
"rule.112275015.self": "true",
|
|
||||||
"rule.112275015.to_port": "65535",
|
|
||||||
"rule.2180185248.cidr": "0.0.0.0/0",
|
|
||||||
"rule.2180185248.from_group_id": "",
|
|
||||||
"rule.2180185248.from_port": "-1",
|
|
||||||
"rule.2180185248.id": "ffdcdd5e-f18b-4537-b502-8849affdfed9",
|
|
||||||
"rule.2180185248.ip_protocol": "icmp",
|
|
||||||
"rule.2180185248.self": "false",
|
|
||||||
"rule.2180185248.to_port": "-1",
|
|
||||||
"rule.3267409695.cidr": "",
|
|
||||||
"rule.3267409695.from_group_id": "",
|
|
||||||
"rule.3267409695.from_port": "-1",
|
|
||||||
"rule.3267409695.id": "4f91d9ca-940c-4f4d-9ce1-024cbd7d9c54",
|
|
||||||
"rule.3267409695.ip_protocol": "icmp",
|
|
||||||
"rule.3267409695.self": "true",
|
|
||||||
"rule.3267409695.to_port": "-1",
|
|
||||||
"rule.635693822.cidr": "",
|
|
||||||
"rule.635693822.from_group_id": "",
|
|
||||||
"rule.635693822.from_port": "1",
|
|
||||||
"rule.635693822.id": "c6816e5b-a1a4-4071-acce-d09b92d14d49",
|
|
||||||
"rule.635693822.ip_protocol": "udp",
|
|
||||||
"rule.635693822.self": "true",
|
|
||||||
"rule.635693822.to_port": "65535",
|
|
||||||
"rule.836640770.cidr": "0.0.0.0/0",
|
|
||||||
"rule.836640770.from_group_id": "",
|
|
||||||
"rule.836640770.from_port": "22",
|
|
||||||
"rule.836640770.id": "8845acba-636b-4c23-b9e2-5bff76d9008d",
|
|
||||||
"rule.836640770.ip_protocol": "tcp",
|
|
||||||
"rule.836640770.self": "false",
|
|
||||||
"rule.836640770.to_port": "22"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master": {
|
|
||||||
"type": "openstack_compute_secgroup_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
|
||||||
"attributes": {
|
|
||||||
"description": "example - Kubernetes Master",
|
|
||||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
|
||||||
"name": "example-k8s-master",
|
|
||||||
"rule.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master.0": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.12",
|
|
||||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master.1": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.70",
|
|
||||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_node": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.76",
|
|
||||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"serial": 16,
|
|
||||||
"modules": [
|
|
||||||
{
|
|
||||||
"path": [
|
|
||||||
"root"
|
|
||||||
],
|
|
||||||
"outputs": {},
|
|
||||||
"resources": {}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -6,10 +6,26 @@ variable "number_of_k8s_masters" {
|
|||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes_no_floating_ip" {
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_gfs_nodes_no_floating_ip" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "gfs_volume_size_in_gb" {
|
||||||
|
default = 75
|
||||||
|
}
|
||||||
|
|
||||||
variable "public_key_path" {
|
variable "public_key_path" {
|
||||||
description = "The path of the ssh pub key"
|
description = "The path of the ssh pub key"
|
||||||
default = "~/.ssh/id_rsa.pub"
|
default = "~/.ssh/id_rsa.pub"
|
||||||
@@ -20,11 +36,21 @@ variable "image" {
|
|||||||
default = "ubuntu-14.04"
|
default = "ubuntu-14.04"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "image_gfs" {
|
||||||
|
description = "Glance image to use for GlusterFS"
|
||||||
|
default = "ubuntu-16.04"
|
||||||
|
}
|
||||||
|
|
||||||
variable "ssh_user" {
|
variable "ssh_user" {
|
||||||
description = "used to fill out tags for ansible inventory"
|
description = "used to fill out tags for ansible inventory"
|
||||||
default = "ubuntu"
|
default = "ubuntu"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "ssh_user_gfs" {
|
||||||
|
description = "used to fill out tags for ansible inventory"
|
||||||
|
default = "ubuntu"
|
||||||
|
}
|
||||||
|
|
||||||
variable "flavor_k8s_master" {
|
variable "flavor_k8s_master" {
|
||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
@@ -33,6 +59,9 @@ variable "flavor_k8s_node" {
|
|||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "flavor_gfs_node" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
variable "network_name" {
|
variable "network_name" {
|
||||||
description = "name of the internal network to use"
|
description = "name of the internal network to use"
|
||||||
|
|||||||
@@ -309,6 +309,7 @@ def openstack_host(resource, module_name):
|
|||||||
attrs = {
|
attrs = {
|
||||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||||
|
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||||
sep='_'),
|
sep='_'),
|
||||||
'id': raw_attrs['id'],
|
'id': raw_attrs['id'],
|
||||||
@@ -346,6 +347,15 @@ def openstack_host(resource, module_name):
|
|||||||
if 'metadata.ssh_user' in raw_attrs:
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
|
|
||||||
|
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
|
||||||
|
device_index = 1
|
||||||
|
for key, value in raw_attrs.items():
|
||||||
|
match = re.search("^volume.*.device$", key)
|
||||||
|
if match:
|
||||||
|
attrs['disk_volume_device_'+str(device_index)] = value
|
||||||
|
device_index += 1
|
||||||
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
# attrs specific to Mantl
|
||||||
attrs.update({
|
attrs.update({
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||||
|
|||||||
118
docs/ansible.md
118
docs/ansible.md
@@ -45,6 +45,120 @@ kube-master
|
|||||||
etcd
|
etcd
|
||||||
```
|
```
|
||||||
|
|
||||||
Group vars
|
Group vars and overriding variables precedence
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
||||||
|
|
||||||
|
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||||
|
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
|
those cannot be overriden from the group vars. In order to override, one should use
|
||||||
|
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||||
|
|
||||||
|
Kargo uses only a few layers to override things (or expect them to
|
||||||
|
be overriden for roles):
|
||||||
|
|
||||||
|
Layer | Comment
|
||||||
|
------|--------
|
||||||
|
**role defaults** | provides best UX to override things for Kargo deployments
|
||||||
|
inventory vars | Unused
|
||||||
|
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||||
|
inventory host_vars | Unused
|
||||||
|
playbook group_vars | Unuses
|
||||||
|
playbook host_vars | Unused
|
||||||
|
**host facts** | Kargo overrides for internal roles' logic, like state flags
|
||||||
|
play vars | Unused
|
||||||
|
play vars_prompt | Unused
|
||||||
|
play vars_files | Unused
|
||||||
|
registered vars | Unused
|
||||||
|
set_facts | Kargo overrides those, for some places
|
||||||
|
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||||
|
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
|
||||||
|
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||||
|
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||||
|
|
||||||
|
Ansible tags
|
||||||
|
------------
|
||||||
|
The following tags are defined in playbooks:
|
||||||
|
|
||||||
|
| Tag name | Used for
|
||||||
|
|--------------------------|---------
|
||||||
|
| apps | K8s apps definitions
|
||||||
|
| azure | Cloud-provider Azure
|
||||||
|
| bastion | Setup ssh config for bastion
|
||||||
|
| bootstrap-os | Anything related to host OS configuration
|
||||||
|
| calico | Network plugin Calico
|
||||||
|
| canal | Network plugin Canal
|
||||||
|
| cloud-provider | Cloud-provider related tasks
|
||||||
|
| dnsmasq | Configuring DNS stack for hosts and K8s apps
|
||||||
|
| docker | Configuring docker for hosts
|
||||||
|
| download | Fetching container images to a delegate host
|
||||||
|
| etcd | Configuring etcd cluster
|
||||||
|
| etcd-pre-upgrade | Upgrading etcd cluster
|
||||||
|
| etcd-secrets | Configuring etcd certs/keys
|
||||||
|
| etchosts | Configuring /etc/hosts entries for hosts
|
||||||
|
| facts | Gathering facts and misc check results
|
||||||
|
| flannel | Network plugin flannel
|
||||||
|
| gce | Cloud-provider GCP
|
||||||
|
| hyperkube | Manipulations with K8s hyperkube image
|
||||||
|
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||||
|
| k8s-secrets | Configuring K8s certs/keys
|
||||||
|
| kpm | Installing K8s apps definitions with KPM
|
||||||
|
| kube-apiserver | Configuring self-hosted kube-apiserver
|
||||||
|
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
||||||
|
| kubectl | Installing kubectl and bash completion
|
||||||
|
| kubelet | Configuring kubelet service
|
||||||
|
| kube-proxy | Configuring self-hosted kube-proxy
|
||||||
|
| kube-scheduler | Configuring self-hosted kube-scheduler
|
||||||
|
| localhost | Special steps for the localhost (ansible runner)
|
||||||
|
| master | Configuring K8s master node role
|
||||||
|
| netchecker | Installing netchecker K8s app
|
||||||
|
| network | Configuring networking plugins for K8s
|
||||||
|
| nginx | Configuring LB for kube-apiserver instances
|
||||||
|
| node | Configuring K8s minion (compute) node role
|
||||||
|
| openstack | Cloud-provider OpenStack
|
||||||
|
| preinstall | Preliminary configuration steps
|
||||||
|
| resolvconf | Configuring /etc/resolv.conf for hosts/apps
|
||||||
|
| upgrade | Upgrading, f.e. container images/binaries
|
||||||
|
| upload | Distributing images/binaries across hosts
|
||||||
|
| weave | Network plugin Weave
|
||||||
|
|
||||||
|
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
||||||
|
tags found in the codebase. New tags will be listed with the empty "Used for"
|
||||||
|
field.
|
||||||
|
|
||||||
|
Example commands
|
||||||
|
----------------
|
||||||
|
Example command to filter and apply only DNS configuration tasks and skip
|
||||||
|
everything else related to host OS configuration and downloading images of containers:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||||
|
```
|
||||||
|
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
||||||
|
```
|
||||||
|
And this prepares all container images localy (at the ansible runner node) without installing
|
||||||
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/inventory.ini cluster.yaml \
|
||||||
|
-e download_run_once=true -e download_localhost=true \
|
||||||
|
--tags download --skip-tags upload,upgrade
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing.
|
||||||
|
|
||||||
|
Bastion host
|
||||||
--------------
|
--------------
|
||||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||||
|
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
||||||
|
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||||
|
bastion host.
|
||||||
|
|
||||||
|
```
|
||||||
|
bastion ansible_ssh_host=x.x.x.x
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about Ansible and bastion hosts, read
|
||||||
|
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||||
|
|||||||
56
docs/azure.md
Normal file
56
docs/azure.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
Azure
|
||||||
|
===============
|
||||||
|
|
||||||
|
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||||
|
|
||||||
|
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||||
|
|
||||||
|
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status)
|
||||||
|
|
||||||
|
### Parameters
|
||||||
|
|
||||||
|
Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file.
|
||||||
|
|
||||||
|
All of the values can be retrieved using the azure cli tool which can be downloaded here: https://docs.microsoft.com/en-gb/azure/xplat-cli-install
|
||||||
|
After installation you have to run `azure login` to get access to your account.
|
||||||
|
|
||||||
|
|
||||||
|
#### azure\_tenant\_id + azure\_subscription\_id
|
||||||
|
run `azure account show` to retrieve your subscription id and tenant id:
|
||||||
|
`azure_tenant_id` -> Tenant ID field
|
||||||
|
`azure_subscription_id` -> ID field
|
||||||
|
|
||||||
|
|
||||||
|
#### azure\_location
|
||||||
|
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list`
|
||||||
|
|
||||||
|
|
||||||
|
#### azure\_resource\_group
|
||||||
|
The name of the resource group your instances are in, can be retrieved via `azure group list`
|
||||||
|
|
||||||
|
#### azure\_vnet\_name
|
||||||
|
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
||||||
|
|
||||||
|
#### azure\_subnet\_name
|
||||||
|
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
|
||||||
|
|
||||||
|
#### azure\_security\_group\_name
|
||||||
|
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
||||||
|
|
||||||
|
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||||
|
These will have to be generated first:
|
||||||
|
- Create an Azure AD Application with:
|
||||||
|
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
|
||||||
|
The name, identifier-uri, home-page and the password can be choosen
|
||||||
|
Note the AppId in the output.
|
||||||
|
- Create Service principal for the application with:
|
||||||
|
`azure ad sp create --applicationId AppId`
|
||||||
|
This is the AppId from the last command
|
||||||
|
- Create the role assignment with:
|
||||||
|
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
||||||
|
|
||||||
|
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||||
|
|
||||||
|
## Provisioning Azure with Resource Group Templates
|
||||||
|
|
||||||
|
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||||
116
docs/calico.md
116
docs/calico.md
@@ -10,23 +10,57 @@ docker ps | grep calico
|
|||||||
The **calicoctl** command allows to check the status of the network workloads.
|
The **calicoctl** command allows to check the status of the network workloads.
|
||||||
* Check the status of Calico nodes
|
* Check the status of Calico nodes
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl node status
|
||||||
|
```
|
||||||
|
|
||||||
|
or for versions prior *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl status
|
calicoctl status
|
||||||
```
|
```
|
||||||
|
|
||||||
* Show the configured network subnet for containers
|
* Show the configured network subnet for containers
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl get ippool -o wide
|
||||||
|
```
|
||||||
|
|
||||||
|
or for versions prior *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl pool show
|
calicoctl pool show
|
||||||
```
|
```
|
||||||
|
|
||||||
* Show the workloads (ip addresses of containers and their located)
|
* Show the workloads (ip addresses of containers and their located)
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl get workloadEndpoint -o wide
|
||||||
|
```
|
||||||
|
|
||||||
|
and
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl get hostEndpoint -o wide
|
||||||
|
```
|
||||||
|
|
||||||
|
or for versions prior *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl endpoint show --detail
|
calicoctl endpoint show --detail
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optionnal : BGP Peering with border routers
|
##### Optional : Define network backend
|
||||||
|
|
||||||
|
In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value.
|
||||||
|
|
||||||
|
To re-define you need to edit the inventory and add a group variable `calico_network_backend`
|
||||||
|
|
||||||
|
```
|
||||||
|
calico_network_backend: none
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Optional : BGP Peering with border routers
|
||||||
|
|
||||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
@@ -37,3 +71,83 @@ you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
|||||||
```
|
```
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Optional : Define global AS number
|
||||||
|
|
||||||
|
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||||
|
It defaults to "64512".
|
||||||
|
|
||||||
|
##### Optional : BGP Peering with route reflectors
|
||||||
|
|
||||||
|
At large scale you may want to disable full node-to-node mesh in order to
|
||||||
|
optimize your BGP topology and improve `calico-node` containers' start times.
|
||||||
|
|
||||||
|
To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
||||||
|
recommended here:
|
||||||
|
|
||||||
|
* https://hub.docker.com/r/calico/routereflector/
|
||||||
|
* http://docs.projectcalico.org/v2.0/reference/private-cloud/l3-interconnect-fabric
|
||||||
|
|
||||||
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
|
* `calico-rr` group with nodes in it. At the moment it's incompatible with
|
||||||
|
`kube-node` due to BGP port conflict with `calico-node` container. So you
|
||||||
|
should not have nodes in both `calico-rr` and `kube-node` groups.
|
||||||
|
* `cluster_id` by route reflector node/group (see details
|
||||||
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
|
Here's an example of Kargo inventory with route reflectors:
|
||||||
|
|
||||||
|
```
|
||||||
|
[all]
|
||||||
|
rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10
|
||||||
|
rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11
|
||||||
|
node2 ansible_ssh_host=10.210.1.12 ip=10.210.1.12
|
||||||
|
node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13
|
||||||
|
node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
|
||||||
|
node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
node4
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
node4
|
||||||
|
node5
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
[calico-rr]
|
||||||
|
rr0
|
||||||
|
rr1
|
||||||
|
|
||||||
|
[rack0]
|
||||||
|
rr0
|
||||||
|
rr1
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
node4
|
||||||
|
node5
|
||||||
|
|
||||||
|
[rack0:vars]
|
||||||
|
cluster_id="1.0.0.1"
|
||||||
|
```
|
||||||
|
|
||||||
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
|
`global_as_num` is set to `65400`:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Cloud providers configuration
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||||
|
|||||||
25
docs/comparisons.md
Normal file
25
docs/comparisons.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Kargo vs [Kops](https://github.com/kubernetes/kops)
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
|
||||||
|
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||||
|
itself, and as such is less flexible in deployment platforms. For people with
|
||||||
|
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||||
|
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
|
||||||
|
however, is more tightly integrated with the unique features of the clouds it
|
||||||
|
supports so it could be a better choice if you know that you will only be using
|
||||||
|
one platform for the foreseeable future.
|
||||||
|
|
||||||
|
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||||
|
management, including self-hosted layouts, dynamic discovery services and so
|
||||||
|
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||||
|
it would've likely been named a "Kubernetes cluster operator". Kargo however,
|
||||||
|
does generic configuration management tasks from the "OS operators" ansible
|
||||||
|
world, plus some initial K8s clustering (with networking plugins included) and
|
||||||
|
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
|
||||||
|
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||||
|
knowledge from it and offload generic OS configuration things from it, which
|
||||||
|
hopefully benefits both sides.
|
||||||
@@ -1,70 +1,121 @@
|
|||||||
K8s DNS stack by Kargo
|
K8s DNS stack by Kargo
|
||||||
======================
|
======================
|
||||||
|
|
||||||
Kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
|
|
||||||
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
||||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
||||||
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
|
||||||
place, followed by external resolvers, for example:
|
|
||||||
|
|
||||||
```
|
|
||||||
skip_dnsmasq: true
|
|
||||||
nameservers: [8.8.8.8]
|
|
||||||
upstream_dns_servers: [172.18.32.6]
|
|
||||||
```
|
|
||||||
or
|
|
||||||
```
|
|
||||||
skip_dnsmasq: false
|
|
||||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
|
||||||
```
|
|
||||||
The vars are explained below as well.
|
|
||||||
|
|
||||||
DNS configuration details
|
DNS variables
|
||||||
-------------------------
|
=============
|
||||||
|
|
||||||
Here is an approximate picture of how DNS things working and
|
There are several global variables which can be used to modify DNS settings:
|
||||||
being configured by Kargo ansible playbooks:
|
|
||||||
|
|
||||||

|
#### ndots
|
||||||
|
ndots value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
Note that an additional dnsmasq daemon set is installed by Kargo
|
It is important to note that multiple search domains combined with high ``ndots``
|
||||||
by default. Kubelet will configure DNS base of all pods to use the
|
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||||
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
The dnsmasq DaemonSet can accept lower ``ndots`` values and return NXDOMAIN
|
||||||
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
replies for [bogus internal FQDNS](https://github.com/kubernetes/kubernetes/issues/19634#issuecomment-253948954)
|
||||||
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
||||||
authoritative DNS server for the given cluser domain (and its subdomains
|
protective, but still recursive resolver in front of kubedns.
|
||||||
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
|
||||||
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
|
||||||
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
|
||||||
|
|
||||||
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
#### searchdomains
|
||||||
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
|
||||||
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
|
||||||
This mitigates existing Linux limitation of max 3 nameservers in the
|
|
||||||
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
|
||||||
clustered DNS services.
|
|
||||||
|
|
||||||
You can skip the dnsmasq daemon set install steps by setting the
|
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||||
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
to 256 characters. Depending on the length of ``dns_domain``, you're limitted to less then the total limit.
|
||||||
the nameservers limitation. Sadly, there is no way to work around the
|
|
||||||
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||||
use the ``searchdomains`` var to define no more than a three custom domains.
|
additional search domains. Please take this into the accounts for the limits.
|
||||||
Remaining three slots are reserved for K8s cluster default subdomains.
|
|
||||||
|
#### nameservers
|
||||||
|
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
|
||||||
|
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
|
||||||
|
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
|
||||||
|
|
||||||
|
#### upstream_dns_servers
|
||||||
|
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||||
|
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||||
|
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||||
|
|
||||||
|
DNS modes supported by kargo
|
||||||
|
============================
|
||||||
|
|
||||||
|
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
|
|
||||||
|
## dns_mode
|
||||||
|
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
|
||||||
|
|
||||||
|
#### dnsmasq_kubedns (default)
|
||||||
|
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||||
|
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||||
|
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||||
|
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||||
|
|
||||||
|
#### kubedns
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||||
|
all queries.
|
||||||
|
|
||||||
|
#### none
|
||||||
|
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||||
|
leaves you with a non functional cluster.
|
||||||
|
|
||||||
|
## resolvconf_mode
|
||||||
|
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||||
|
There are three modes available:
|
||||||
|
|
||||||
|
#### docker_dns (default)
|
||||||
|
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
|
||||||
|
|
||||||
|
The following nameservers are added to the docker daemon (in the same order as listed here):
|
||||||
|
* cluster nameserver (depends on dns_mode)
|
||||||
|
* content of optional upstream_dns_servers variable
|
||||||
|
* host system nameservers (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
|
The following search domains are added to the docker daemon (in the same order as listed here):
|
||||||
|
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
|
||||||
|
* content of optional searchdomains variable
|
||||||
|
* host system search domains (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
|
The following dns options are added to the docker daemon
|
||||||
|
* ndots:{{ ndots }}
|
||||||
|
* timeout:2
|
||||||
|
* attempts:2
|
||||||
|
|
||||||
|
For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking
|
||||||
|
the --cluster_dns (either dnsmasq or kubedns, depending on dns_mode) kubelet option into account.
|
||||||
|
For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which
|
||||||
|
are not started/managed by k8s will also use these docker options.
|
||||||
|
|
||||||
|
The host system name servers are added to ensure name resolution is also working while cluster DNS is not
|
||||||
|
running yet. This is especially important in early stages of cluster deployment. In this early stage,
|
||||||
|
DNS queries to the cluster DNS will timeout after a few seconds, resulting in the system nameserver being
|
||||||
|
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
|
||||||
|
servers, which in turn will forward queries to the system nameserver if required.
|
||||||
|
|
||||||
|
#### host_resolvconf
|
||||||
|
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||||
|
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||||
|
|
||||||
|
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||||
|
stage (``dns_early: true``), ``/etc/resolv.conf`` is configured to use the DNS servers found in ``upstream_dns_servers``
|
||||||
|
and ``nameservers``. Later, ``/etc/resolv.conf`` is reconfigured to use the cluster DNS server first, leaving
|
||||||
|
the other nameservers as backups.
|
||||||
|
|
||||||
|
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||||
|
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||||
|
|
||||||
|
#### none
|
||||||
|
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
|
||||||
|
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
||||||
|
cluster service names.
|
||||||
|
|
||||||
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
|
||||||
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
|
||||||
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
|
||||||
things, it comes by the price of limited nameservers though. As you know now,
|
|
||||||
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
|
||||||
specify no more than a two nameservers for infra and/or external use.
|
|
||||||
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
|
||||||
and will be merged together with the ``skydns_server`` IP into the hots'
|
|
||||||
``/etc/resolv.conf``.
|
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
@@ -78,8 +129,7 @@ Limitations
|
|||||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||||
for the SkyDNS ``ndots`` param via an
|
for the SkyDNS ``ndots`` param via an
|
||||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||||
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
add-on, while SkyDNS supports it though.
|
||||||
as expected as they require the ``ndots:7``.
|
|
||||||
|
|
||||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
|
|||||||
42
docs/downloads.md
Normal file
42
docs/downloads.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
Downloading binaries and containers
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Kargo supports several download/upload modes. The default is:
|
||||||
|
|
||||||
|
* Each node downloads binaries and container images on its own, which is
|
||||||
|
``download_run_once: False``.
|
||||||
|
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
|
||||||
|
* For system managed containers, like kubelet or etcd, pull policy is
|
||||||
|
``download_always_pull: False``, which is pull if only the wanted repo and
|
||||||
|
tag/sha256 digest differs from that the host has.
|
||||||
|
|
||||||
|
There is also a "pull once, push many" mode as well:
|
||||||
|
|
||||||
|
* Override the ``download_run_once: True`` to download container images only once
|
||||||
|
then push to cluster nodes in batches. The default delegate node
|
||||||
|
for pushing images is the first `kube-master`.
|
||||||
|
* If your ansible runner node (aka the admin node) have password-less sudo and
|
||||||
|
docker enabled, you may want to define the ``download_localhost: True``, which
|
||||||
|
makes that node a delegate for pushing images while running the deployment with
|
||||||
|
ansible. This maybe the case if cluster nodes cannot access each over via ssh
|
||||||
|
or you want to use local docker images as a cache for multiple clusters.
|
||||||
|
|
||||||
|
Container images and binary files are described by the vars like ``foo_version``,
|
||||||
|
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||||
|
``foo_image_tag`` or optional ``foo_digest_checksum`` for containers.
|
||||||
|
|
||||||
|
Container images may be defined by its repo and tag, for example:
|
||||||
|
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
|
||||||
|
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
|
||||||
|
|
||||||
|
Note, the sha256 digest and the image tag must be both specified and correspond
|
||||||
|
to each other. The given example above is represented by the following vars:
|
||||||
|
```
|
||||||
|
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
||||||
|
dnsmasq_image_repo: andyshinn/dnsmasq
|
||||||
|
dnsmasq_image_tag: '2.72'
|
||||||
|
```
|
||||||
|
The full list of available vars may be found in the download's ansible role defaults.
|
||||||
|
Those also allow to specify custom urls and local repositories for binaries and container
|
||||||
|
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||||
|
so the hosts can resolve those urls and repos.
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 654 KiB |
BIN
docs/figures/kargo-calico-rr.png
Normal file
BIN
docs/figures/kargo-calico-rr.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -17,3 +17,16 @@ kargo aws --instances 3
|
|||||||
```
|
```
|
||||||
kargo deploy --aws -u centos -n calico
|
kargo deploy --aws -u centos -n calico
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Building your own inventory
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
||||||
|
an example inventory located
|
||||||
|
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
||||||
|
|
||||||
|
You can use an
|
||||||
|
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_generator/inventory_generator.py)
|
||||||
|
to create or modify an Ansible inventory. Currently, it is limited in
|
||||||
|
functionality and is only use for making a basic Kargo cluster, but it does
|
||||||
|
support creating large clusters.
|
||||||
|
|||||||
@@ -5,10 +5,6 @@ The following components require a highly available endpoints:
|
|||||||
* etcd cluster,
|
* etcd cluster,
|
||||||
* kube-apiserver service instances.
|
* kube-apiserver service instances.
|
||||||
|
|
||||||
The former provides the
|
|
||||||
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
|
|
||||||
the cluster members in HA fashion.
|
|
||||||
|
|
||||||
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
||||||
achieve the same goal.
|
achieve the same goal.
|
||||||
|
|
||||||
@@ -49,13 +45,15 @@ type. The following diagram shows how traffic to the apiserver is directed.
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
..note:: Kubernetes master nodes still use insecure localhost access because
|
Note: Kubernetes master nodes still use insecure localhost access because
|
||||||
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||||
services.
|
services. This makes backends receiving unencrypted traffic and may be a
|
||||||
|
security issue when interconnecting different nodes, or maybe not, if those
|
||||||
|
belong to the isolated management network without external access.
|
||||||
|
|
||||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||||
provides access for external clients, while the internal LB accepts client
|
provides access for external clients, while the internal LB accepts client
|
||||||
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
|
connections only to the localhost.
|
||||||
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||||
an example configuration for a HAProxy service acting as an external LB:
|
an example configuration for a HAProxy service acting as an external LB:
|
||||||
```
|
```
|
||||||
@@ -81,24 +79,19 @@ loadbalancer_apiserver:
|
|||||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||||
the HAProxy service should as well be HA and requires a VIP management, which
|
the HAProxy service should as well be HA and requires a VIP management, which
|
||||||
is out of scope of this doc.
|
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||||
|
localhost LB configuration.
|
||||||
|
|
||||||
Specifying an external LB overrides any internal localhost LB configuration.
|
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||||
Note that for this example, the `kubernetes-apiserver-http` endpoint
|
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||||
has backends receiving unencrypted traffic, which may be a security issue
|
no VIP management.
|
||||||
when interconnecting different nodes, or maybe not, if those belong to the
|
|
||||||
isolated management network without external access.
|
|
||||||
|
|
||||||
In order to achieve HA for HAProxy instances, those must be running on the
|
|
||||||
each node in the `k8s-cluster` group as well, but require no VIP, thus
|
|
||||||
no VIP management.
|
|
||||||
|
|
||||||
Access endpoints are evaluated automagically, as the following:
|
Access endpoints are evaluated automagically, as the following:
|
||||||
|
|
||||||
| Endpoint type | kube-master | non-master |
|
| Endpoint type | kube-master | non-master |
|
||||||
|------------------------------|---------------|---------------------|
|
|------------------------------|---------------|---------------------|
|
||||||
| Local LB | http://lc:p | http://lc:sp |
|
| Local LB | http://lc:p | https://lc:sp |
|
||||||
| External LB, no internal | http://lc:p | https://lb:lp |
|
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
|
|||||||
@@ -8,12 +8,24 @@ For a large scaled deployments, consider the following configuration changes:
|
|||||||
|
|
||||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
|
|
||||||
* Override the ``download_run_once: true`` to download binaries and container
|
* Override the ``download_run_once: true`` and/or ``download_localhost: true``.
|
||||||
images only once then push to nodes in batches.
|
See download modes for details.
|
||||||
|
|
||||||
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
||||||
load on a delegate (the first K8s master node) then retrying failed
|
load on a delegate (the first K8s master node) then retrying failed
|
||||||
push or download operations.
|
push or download operations.
|
||||||
|
|
||||||
|
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
|
||||||
|
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
|
||||||
|
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
|
||||||
|
Please note that limits must always be greater than or equal to requests.
|
||||||
|
|
||||||
|
* Tune CPU/memory limits and requests. Those are located in roles' defaults
|
||||||
|
and named like ``foo_memory_limit``, ``foo_memory_requests`` and
|
||||||
|
``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s
|
||||||
|
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units will
|
||||||
|
end up with the 'm' skipped for docker as well. This is required as docker does not
|
||||||
|
understand k8s units well.
|
||||||
|
|
||||||
For example, when deploying 200 nodes, you may want to run ansible with
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||||
|
|||||||
41
docs/netcheck.md
Normal file
41
docs/netcheck.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
Network Checker Application
|
||||||
|
===========================
|
||||||
|
|
||||||
|
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
|
||||||
|
Network Checker Application from the 3rd side `l23network/mcp-netchecker` docker
|
||||||
|
images. It consists of the server and agents trying to reach the server by usual
|
||||||
|
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||||
|
automagically verifies a pod to pod connectivity via the cluster IP and checks
|
||||||
|
if DNS resolve is functioning as well.
|
||||||
|
|
||||||
|
The checks are run by agents on a periodic basis and cover standard and host network
|
||||||
|
pods as well. The history of performed checks may be found in the agents' application
|
||||||
|
logs.
|
||||||
|
|
||||||
|
To get the most recent and cluster-wide network connectivity report, run from
|
||||||
|
any of the cluster nodes:
|
||||||
|
```
|
||||||
|
curl http://localhost:31081/api/v1/connectivity_check
|
||||||
|
```
|
||||||
|
Note that Kargo does not invoke the check but only deploys the application, if
|
||||||
|
requested.
|
||||||
|
|
||||||
|
There are related application specifc variables:
|
||||||
|
```
|
||||||
|
netchecker_port: 31081
|
||||||
|
agent_report_interval: 15
|
||||||
|
netcheck_namespace: default
|
||||||
|
agent_img: "quay.io/l23network/mcp-netchecker-agent:v0.1"
|
||||||
|
server_img: "quay.io/l23network/mcp-netchecker-server:v0.1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the application verifies DNS resolve for FQDNs comprising only the
|
||||||
|
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
|
||||||
|
``netchecker-service.default.cluster.local``. If you want to deploy the application
|
||||||
|
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
|
||||||
|
so the resulting search domain records to contain that namespace, like:
|
||||||
|
|
||||||
|
```
|
||||||
|
search: foospace.cluster.local default.cluster.local ...
|
||||||
|
nameserver: ...
|
||||||
|
```
|
||||||
@@ -1,6 +1,10 @@
|
|||||||
Kargo's roadmap
|
Kargo's roadmap
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
### Kubeadm
|
||||||
|
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
||||||
|
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
@@ -26,13 +30,14 @@ Kargo's roadmap
|
|||||||
- single test with the Ansible version n-1 per day
|
- single test with the Ansible version n-1 per day
|
||||||
- Test idempotency on on single OS but for all network plugins/container engines
|
- Test idempotency on on single OS but for all network plugins/container engines
|
||||||
- single test on AWS per day
|
- single test on AWS per day
|
||||||
- test different achitectures :
|
- test different achitectures :
|
||||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
|
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
||||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||||
- Drain worker node when shutting down/deleting an instance
|
- Drain worker node when shutting down/deleting an instance
|
||||||
|
|
||||||
@@ -41,6 +46,7 @@ Kargo's roadmap
|
|||||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||||
- Opencontrail
|
- Opencontrail
|
||||||
- Canal
|
- Canal
|
||||||
|
- Cloud Provider native networking (instead of our network plugins)
|
||||||
|
|
||||||
### High availability
|
### High availability
|
||||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||||
@@ -56,7 +62,7 @@ While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kub
|
|||||||
### Kargo API
|
### Kargo API
|
||||||
- Perform all actions through an **API**
|
- Perform all actions through an **API**
|
||||||
- Store inventories / configurations of mulltiple clusters
|
- Store inventories / configurations of mulltiple clusters
|
||||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
|
|
||||||
### Addons (with kpm)
|
### Addons (with kpm)
|
||||||
Include optionals deployments to init the cluster:
|
Include optionals deployments to init the cluster:
|
||||||
@@ -65,7 +71,7 @@ Include optionals deployments to init the cluster:
|
|||||||
- **Prometheus**
|
- **Prometheus**
|
||||||
|
|
||||||
##### Others
|
##### Others
|
||||||
|
|
||||||
##### Dashboards:
|
##### Dashboards:
|
||||||
- kubernetes-dashboard
|
- kubernetes-dashboard
|
||||||
- Fabric8
|
- Fabric8
|
||||||
|
|||||||
54
docs/test_cases.md
Normal file
54
docs/test_cases.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
Travis CI test matrix
|
||||||
|
=====================
|
||||||
|
|
||||||
|
GCE instances
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Here is the test matrix for the Travis CI gates:
|
||||||
|
|
||||||
|
| Network plugin| OS type| GCE region| Nodes layout|
|
||||||
|
|-------------------------|-------------------------|-------------------------|-------------------------|
|
||||||
|
| canal| debian-8-kubespray| asia-east1-a| ha|
|
||||||
|
| calico| debian-8-kubespray| europe-west1-c| default|
|
||||||
|
| flannel| centos-7| asia-northeast1-c| default|
|
||||||
|
| calico| centos-7| us-central1-b| ha|
|
||||||
|
| weave| rhel-7| us-east1-c| default|
|
||||||
|
| canal| coreos-stable| us-west1-b| default|
|
||||||
|
| canal| rhel-7| asia-northeast1-b| separate|
|
||||||
|
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
|
||||||
|
| calico| coreos-stable| us-central1-f| separate|
|
||||||
|
|
||||||
|
Where the nodes layout `default` is a non-HA two nodes setup with the separate `kube-node`
|
||||||
|
and the `etcd` group merged with the `kube-master`. The `separate` layout is when
|
||||||
|
there is only node of each type, which is a kube master, compute and etcd cluster member.
|
||||||
|
And the `ha` layout stands for a two etcd nodes, two masters and a single worker node,
|
||||||
|
partially intersecting though.
|
||||||
|
|
||||||
|
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
||||||
|
|
||||||
|
Hint: the command
|
||||||
|
```
|
||||||
|
bash scripts/gen_matrix.sh
|
||||||
|
```
|
||||||
|
will (hopefully) generate the CI test cases from the current ``.travis.yml``.
|
||||||
|
|
||||||
|
Gitlab CI test matrix
|
||||||
|
=====================
|
||||||
|
|
||||||
|
GCE instances
|
||||||
|
-------------
|
||||||
|
|
||||||
|
| Stage| Network plugin| OS type| GCE region| Nodes layout
|
||||||
|
|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||||
|
| part1| calico| coreos-stable| us-west1-b| separated|
|
||||||
|
| part1| canal| debian-8-kubespray| us-east1-b| ha|
|
||||||
|
| part1| weave| rhel-7| europe-west1-b| default|
|
||||||
|
| part2| flannel| centos-7| us-west1-a| default|
|
||||||
|
| part2| calico| debian-8-kubespray| us-central1-b| default|
|
||||||
|
| part2| canal| coreos-stable| us-east1-b| default|
|
||||||
|
| special| canal| rhel-7| us-east1-b| separated|
|
||||||
|
| special| weave| ubuntu-1604-xenial| us-central1-b| separated|
|
||||||
|
| special| calico| centos-7| europe-west1-b| ha|
|
||||||
|
| special| weave| coreos-alpha| us-west1-a| ha|
|
||||||
|
|
||||||
|
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.
|
||||||
47
docs/upgrades.md
Normal file
47
docs/upgrades.md
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
Upgrading Kubernetes in Kargo
|
||||||
|
=============================
|
||||||
|
|
||||||
|
#### Description
|
||||||
|
|
||||||
|
Kargo handles upgrades the same way it handles initial deployment. That is to
|
||||||
|
say that each component is laid down in a fixed order. You should be able to
|
||||||
|
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
|
||||||
|
also individually control versions of components by explicitly defining their
|
||||||
|
versions. Here are all version vars for each component:
|
||||||
|
|
||||||
|
* docker_version
|
||||||
|
* kube_version
|
||||||
|
* etcd_version
|
||||||
|
* calico_version
|
||||||
|
* calico_cni_version
|
||||||
|
* weave_version
|
||||||
|
* flannel_version
|
||||||
|
* kubedns_version
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
||||||
|
deploy the following way:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
|
||||||
|
```
|
||||||
|
|
||||||
|
And then repeat with v1.4.6 as kube_version:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Upgrade order
|
||||||
|
|
||||||
|
As mentioned above, components are upgraded in the order in which they were
|
||||||
|
installed in the Ansible playbook. The order of component installation is as
|
||||||
|
follows:
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
# etcd
|
||||||
|
# kubelet and kube-proxy
|
||||||
|
# network_plugin (such as Calico or Weave)
|
||||||
|
# kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||||
|
# Add-ons (such as KubeDNS)
|
||||||
100
docs/vars.md
Normal file
100
docs/vars.md
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
Configurable Parameters in Kargo
|
||||||
|
================================
|
||||||
|
|
||||||
|
#### Generic Ansible variables
|
||||||
|
|
||||||
|
You can view facts gathered by Ansible automatically
|
||||||
|
[here](http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts).
|
||||||
|
|
||||||
|
Some variables of note include:
|
||||||
|
|
||||||
|
* *ansible_user*: user to connect to via SSH
|
||||||
|
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
||||||
|
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
||||||
|
|
||||||
|
#### Common vars that are used in Kargo
|
||||||
|
|
||||||
|
* *calico_version* - Specify version of Calico to use
|
||||||
|
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||||
|
* *docker_version* - Specify version of Docker to used (should be quoted
|
||||||
|
string)
|
||||||
|
* *etcd_version* - Specify version of ETCD to use
|
||||||
|
* *ipip* - Enables Calico ipip encapsulation by default
|
||||||
|
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
|
||||||
|
resides
|
||||||
|
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
|
||||||
|
* *kube_network_plugin* - Changes k8s plugin to Calico
|
||||||
|
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||||
|
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||||
|
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||||
|
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||||
|
|
||||||
|
#### Addressing variables
|
||||||
|
|
||||||
|
* *ip* - IP to use for binding services (host var)
|
||||||
|
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
||||||
|
deploying from a cloud, such as OpenStack or GCE and you have separate
|
||||||
|
public/floating and private IPs.
|
||||||
|
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
|
||||||
|
and access_ip are undefined
|
||||||
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
|
address instead of localhost for kube-masters and kube-master[0] for
|
||||||
|
kube-nodes. See more details in the
|
||||||
|
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
||||||
|
* *loadbalancer_apiserver_localhost* - If enabled, all hosts will connect to
|
||||||
|
the apiserver internally load balanced endpoint. See more details in the
|
||||||
|
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
||||||
|
|
||||||
|
#### Cluster variables
|
||||||
|
|
||||||
|
Kubernetes needs some parameters in order to get deployed. These are the
|
||||||
|
following default cluster paramters:
|
||||||
|
|
||||||
|
* *cluster_name* - Name of cluster (default is cluster.local)
|
||||||
|
* *domain_name* - Name of cluster DNS domain (default is cluster.local)
|
||||||
|
* *kube_network_plugin* - Plugin to use for container networking
|
||||||
|
* *kube_service_addresses* - Subnet for cluster IPs (default is
|
||||||
|
10.233.0.0/18). Must not overlap with kube_pods_subnet
|
||||||
|
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||||
|
overlap with kube_service_addresses.
|
||||||
|
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||||
|
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||||
|
* *dns_setup* - Enables dnsmasq
|
||||||
|
* *dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||||
|
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
||||||
|
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||||
|
OpenStack (default is unset)
|
||||||
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
|
Kubernetes
|
||||||
|
|
||||||
|
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||||
|
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||||
|
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
|
||||||
|
|
||||||
|
#### DNS variables
|
||||||
|
|
||||||
|
By default, dnsmasq gets set up with 8.8.8.8 as an upstream DNS server and all
|
||||||
|
other settings from your existing /etc/resolv.conf are lost. Set the following
|
||||||
|
variables to match your requirements.
|
||||||
|
|
||||||
|
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||||
|
addition to Kargo deployed DNS
|
||||||
|
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||||
|
* *searchdomains* - Array of up to 4 search domains
|
||||||
|
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||||
|
|
||||||
|
For more information, see [DNS
|
||||||
|
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
|
||||||
|
|
||||||
|
#### Other service variables
|
||||||
|
|
||||||
|
* *docker_options* - Commonly used to set
|
||||||
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
|
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||||
|
proxy
|
||||||
|
|
||||||
|
#### User accounts
|
||||||
|
|
||||||
|
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
||||||
|
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
||||||
|
|
||||||
@@ -1,9 +1,31 @@
|
|||||||
# Valid bootstrap options (required): xenial, coreos, none
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
bootstrap_os: none
|
bootstrap_os: none
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
|
# Kubernetes configuration dirs and system namespace.
|
||||||
|
# Those are where all the additional config stuff goes
|
||||||
|
# the kubernetes normally puts in /srv/kubernets.
|
||||||
|
# This puts them in a sane location and namespace.
|
||||||
|
# Editting those values will almost surely break something.
|
||||||
|
kube_config_dir: /etc/kubernetes
|
||||||
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
system_namespace: kube-system
|
||||||
|
|
||||||
|
# This is where all the cert scripts and certs will be located
|
||||||
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
|
||||||
|
# This is where all of the bearer tokens will be stored
|
||||||
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
|
|
||||||
|
# This is where to save basic auth file
|
||||||
|
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||||
|
|
||||||
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
|
kube_version: v1.5.1
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
local_release_dir: "/tmp/releases"
|
local_release_dir: "/tmp/releases"
|
||||||
@@ -21,6 +43,11 @@ kube_cert_group: kube-cert
|
|||||||
# Cluster Loglevel configuration
|
# Cluster Loglevel configuration
|
||||||
kube_log_level: 2
|
kube_log_level: 2
|
||||||
|
|
||||||
|
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
|
||||||
|
# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
|
||||||
|
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
|
||||||
|
kube_api_anonymous_auth: false
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
kube_api_pwd: "changeme"
|
kube_api_pwd: "changeme"
|
||||||
kube_users:
|
kube_users:
|
||||||
@@ -33,8 +60,10 @@ kube_users:
|
|||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
ndots: 5
|
ndots: 2
|
||||||
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||||
|
deploy_netchecker: false
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
# For some environments, each node has a pubilcally accessible
|
||||||
# address and an address it should bind services to. These are
|
# address and an address it should bind services to. These are
|
||||||
@@ -62,13 +91,14 @@ ndots: 5
|
|||||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||||
etcd_multiaccess: false
|
etcd_multiaccess: true
|
||||||
|
|
||||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||||
# kube_apiserver_port (default 443)
|
# kube_apiserver_port (default 443)
|
||||||
loadbalancer_apiserver_localhost: true
|
loadbalancer_apiserver_localhost: true
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
# Kubernetes internal network for services, unused block of space.
|
||||||
@@ -103,42 +133,72 @@ kube_apiserver_insecure_port: 8080 # (http)
|
|||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||||
# as it greatly simplifies configuration of your applications - you can use
|
# as it greatly simplifies configuration of your applications - you can use
|
||||||
# service names instead of magic environment variables.
|
# service names instead of magic environment variables.
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
|
||||||
# Kubernetes won't do this for you (yet).
|
|
||||||
|
|
||||||
# Do not install additional dnsmasq
|
# Can be dnsmasq_kubedns, kubedns or none
|
||||||
skip_dnsmasq: false
|
dns_mode: dnsmasq_kubedns
|
||||||
# Upstream dns servers used by dnsmasq
|
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
resolvconf_mode: docker_dns
|
||||||
|
|
||||||
|
## Upstream dns servers used by dnsmasq
|
||||||
#upstream_dns_servers:
|
#upstream_dns_servers:
|
||||||
# - 8.8.8.8
|
# - 8.8.8.8
|
||||||
# - 8.8.4.4
|
# - 8.8.4.4
|
||||||
#
|
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
|
||||||
dns_setup: true
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
#
|
|
||||||
# # Ip address of the kubernetes skydns service
|
# Ip address of the kubernetes skydns service
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
|
|
||||||
# There are some changes specific to the cloud providers
|
# There are some changes specific to the cloud providers
|
||||||
# for instance we need to encapsulate packets with some network plugins
|
# for instance we need to encapsulate packets with some network plugins
|
||||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
||||||
# When openstack is used make sure to source in the openstack credentials
|
# When openstack is used make sure to source in the openstack credentials
|
||||||
# like you would do when using nova-client before starting the playbook.
|
# like you would do when using nova-client before starting the playbook.
|
||||||
|
# When azure is used, you need to also set the following variables.
|
||||||
# cloud_provider:
|
# cloud_provider:
|
||||||
|
|
||||||
|
# see docs/azure.md for details on how to get these values
|
||||||
|
#azure_tenant_id:
|
||||||
|
#azure_subscription_id:
|
||||||
|
#azure_aad_client_id:
|
||||||
|
#azure_aad_client_secret:
|
||||||
|
#azure_resource_group:
|
||||||
|
#azure_location:
|
||||||
|
#azure_subnet_name:
|
||||||
|
#azure_security_group_name:
|
||||||
|
#azure_vnet_name:
|
||||||
|
#azure_route_table_name:
|
||||||
|
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
## Set these proxy values in order to update docker daemon to use proxies
|
||||||
# http_proxy: ""
|
# http_proxy: ""
|
||||||
# https_proxy: ""
|
# https_proxy: ""
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
# Path used to store Docker data
|
||||||
|
docker_daemon_graph: "/var/lib/docker"
|
||||||
|
|
||||||
## A string of extra options to pass to the docker daemon.
|
## A string of extra options to pass to the docker daemon.
|
||||||
## This string should be exactly as you wish it to appear.
|
## This string should be exactly as you wish it to appear.
|
||||||
## An obvious use case is allowing insecure-registry access
|
## An obvious use case is allowing insecure-registry access
|
||||||
## to self hosted registries like so:
|
## to self hosted registries like so:
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
||||||
|
docker_bin_dir: "/usr/bin"
|
||||||
|
|
||||||
|
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||||
|
## Please note that overlay2 is only supported on newer kernels
|
||||||
|
#docker_storage_options: -s overlay2
|
||||||
|
|
||||||
|
# K8s image pull policy (imagePullPolicy)
|
||||||
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
# default packages to install within the cluster
|
# default packages to install within the cluster
|
||||||
kpm_packages: []
|
kpm_packages: []
|
||||||
# - name: kube-system/grafana
|
# - name: kube-system/grafana
|
||||||
|
|
||||||
|
# Settings for containerized control plane (etcd/kubelet)
|
||||||
|
rkt_version: 1.21.0
|
||||||
|
etcd_deployment_type: docker
|
||||||
|
kubelet_deployment_type: docker
|
||||||
|
|||||||
@@ -1,29 +1,31 @@
|
|||||||
#[kube-master]
|
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||||
#node1 ansible_ssh_host=10.99.0.26
|
# ## different ip than the default iface
|
||||||
#node2 ansible_ssh_host=10.99.0.27
|
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||||
#
|
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||||
#[etcd]
|
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||||
#node1 ansible_ssh_host=10.99.0.26
|
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||||
#node2 ansible_ssh_host=10.99.0.27
|
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||||
#node3 ansible_ssh_host=10.99.0.4
|
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||||
#
|
|
||||||
#[kube-node]
|
# ## configure a bastion host if your nodes are not publicly reachable
|
||||||
#node2 ansible_ssh_host=10.99.0.27
|
# bastion ansible_ssh_host=x.x.x.x
|
||||||
#node3 ansible_ssh_host=10.99.0.4
|
|
||||||
#node4 ansible_ssh_host=10.99.0.5
|
# [kube-master]
|
||||||
#node5 ansible_ssh_host=10.99.0.36
|
# node1
|
||||||
#node6 ansible_ssh_host=10.99.0.37
|
# node2
|
||||||
#
|
|
||||||
#[paris]
|
# [etcd]
|
||||||
#node1 ansible_ssh_host=10.99.0.26
|
# node1
|
||||||
#node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
# node2
|
||||||
#node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
# node3
|
||||||
#
|
|
||||||
#[new-york]
|
# [kube-node]
|
||||||
#node2 ansible_ssh_host=10.99.0.27
|
# node2
|
||||||
#node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
# node3
|
||||||
#node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
# node4
|
||||||
#
|
# node5
|
||||||
#[k8s-cluster:children]
|
# node6
|
||||||
#kube-node
|
|
||||||
#kube-master
|
# [k8s-cluster:children]
|
||||||
|
# kube-node
|
||||||
|
# kube-master
|
||||||
|
|||||||
5
reset.yml
Normal file
5
reset.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
roles:
|
||||||
|
- { role: reset, tags: reset }
|
||||||
18
roles/bastion-ssh-config/tasks/main.yml
Normal file
18
roles/bastion-ssh-config/tasks/main.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
- set_fact:
|
||||||
|
has_bastion: "{{ 'bastion' in groups['all'] }}"
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
bastion_ip: "{{ hostvars['bastion']['ansible_ssh_host'] }}"
|
||||||
|
when: has_bastion
|
||||||
|
|
||||||
|
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
||||||
|
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_ssh_user in real_user
|
||||||
|
- set_fact:
|
||||||
|
real_user: "{{ ansible_ssh_user }}"
|
||||||
|
delegate_to: bastion
|
||||||
|
when: has_bastion
|
||||||
|
|
||||||
|
- name: create ssh bastion conf
|
||||||
|
become: false
|
||||||
|
template: src=ssh-bastion.conf dest="{{ playbook_dir }}/ssh-bastion.conf"
|
||||||
21
roles/bastion-ssh-config/templates/ssh-bastion.conf
Normal file
21
roles/bastion-ssh-config/templates/ssh-bastion.conf
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{% if has_bastion %}
|
||||||
|
{% set vars={'hosts': ''} %}
|
||||||
|
{% set user='' %}
|
||||||
|
|
||||||
|
{% for h in groups['all'] %}
|
||||||
|
{% if h != 'bastion' %}
|
||||||
|
{% if vars.update({'hosts': vars['hosts'] + ' ' + hostvars[h]['ansible_ssh_host']}) %}{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
Host {{ bastion_ip }}
|
||||||
|
Hostname {{ bastion_ip }}
|
||||||
|
StrictHostKeyChecking no
|
||||||
|
ControlMaster auto
|
||||||
|
ControlPath ~/.ssh/ansible-%r@%h:%p
|
||||||
|
ControlPersist 5m
|
||||||
|
|
||||||
|
Host {{ vars['hosts'] }}
|
||||||
|
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
||||||
|
StrictHostKeyChecking no
|
||||||
|
{% endif %}
|
||||||
14
roles/bootstrap-os/tasks/bootstrap-centos.yml
Normal file
14
roles/bootstrap-os/tasks/bootstrap-centos.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Check presence of fastestmirror.conf
|
||||||
|
stat: path=/etc/yum/pluginconf.d/fastestmirror.conf
|
||||||
|
register: fastestmirror
|
||||||
|
|
||||||
|
# fastestmirror plugin actually slows down Ansible deployments
|
||||||
|
- name: Disable fastestmirror plugin
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/yum/pluginconf.d/fastestmirror.conf
|
||||||
|
regexp: "^enabled=.*"
|
||||||
|
line: "enabled=0"
|
||||||
|
state: present
|
||||||
|
when: fastestmirror.stat.exists
|
||||||
@@ -2,8 +2,8 @@
|
|||||||
- name: Bootstrap | Check if bootstrap is needed
|
- name: Bootstrap | Check if bootstrap is needed
|
||||||
raw: stat /opt/bin/.bootstrapped
|
raw: stat /opt/bin/.bootstrapped
|
||||||
register: need_bootstrap
|
register: need_bootstrap
|
||||||
ignore_errors: True
|
failed_when: false
|
||||||
|
tags: facts
|
||||||
|
|
||||||
- name: Bootstrap | Run bootstrap.sh
|
- name: Bootstrap | Run bootstrap.sh
|
||||||
script: bootstrap.sh
|
script: bootstrap.sh
|
||||||
@@ -11,13 +11,15 @@
|
|||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/opt/bin/python"
|
ansible_python_interpreter: "/opt/bin/python"
|
||||||
|
tags: facts
|
||||||
|
|
||||||
- name: Bootstrap | Check if we need to install pip
|
- name: Bootstrap | Check if we need to install pip
|
||||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||||
register: need_pip
|
register: need_pip
|
||||||
ignore_errors: True
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: (need_bootstrap | failed)
|
when: (need_bootstrap | failed)
|
||||||
|
tags: facts
|
||||||
|
|
||||||
- name: Bootstrap | Copy get-pip.py
|
- name: Bootstrap | Copy get-pip.py
|
||||||
copy: src=get-pip.py dest=~/get-pip.py
|
copy: src=get-pip.py dest=~/get-pip.py
|
||||||
|
|||||||
@@ -4,11 +4,13 @@
|
|||||||
- name: Bootstrap | Check if bootstrap is needed
|
- name: Bootstrap | Check if bootstrap is needed
|
||||||
raw: which python
|
raw: which python
|
||||||
register: need_bootstrap
|
register: need_bootstrap
|
||||||
ignore_errors: True
|
failed_when: false
|
||||||
|
tags: facts
|
||||||
|
|
||||||
- name: Bootstrap | Install python 2.x
|
- name: Bootstrap | Install python 2.x
|
||||||
raw: DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
raw: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||||
when: need_bootstrap | failed
|
when: need_bootstrap | failed
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/usr/bin/python"
|
ansible_python_interpreter: "/usr/bin/python"
|
||||||
|
tags: facts
|
||||||
|
|||||||
@@ -3,4 +3,9 @@
|
|||||||
when: bootstrap_os == "ubuntu"
|
when: bootstrap_os == "ubuntu"
|
||||||
|
|
||||||
- include: bootstrap-coreos.yml
|
- include: bootstrap-coreos.yml
|
||||||
when: bootstrap_os == "coreos"
|
when: bootstrap_os == "coreos"
|
||||||
|
|
||||||
|
- include: bootstrap-centos.yml
|
||||||
|
when: bootstrap_os == "centos"
|
||||||
|
|
||||||
|
- include: setup-pipelining.yml
|
||||||
6
roles/bootstrap-os/tasks/setup-pipelining.yml
Normal file
6
roles/bootstrap-os/tasks/setup-pipelining.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Remove requiretty to make ssh pipelining work
|
||||||
|
|
||||||
|
- name: Remove require tty
|
||||||
|
lineinfile: regexp="^\w+\s+requiretty" dest=/etc/sudoers state=absent
|
||||||
|
|
||||||
@@ -10,3 +10,16 @@
|
|||||||
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||||
#nameservers:
|
#nameservers:
|
||||||
# - 127.0.0.1
|
# - 127.0.0.1
|
||||||
|
|
||||||
|
# Versions
|
||||||
|
dnsmasq_version: 2.72
|
||||||
|
|
||||||
|
# Images
|
||||||
|
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||||
|
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||||
|
|
||||||
|
# Limits for dnsmasq/kubedns apps
|
||||||
|
dns_cpu_limit: 100m
|
||||||
|
dns_memory_limit: 170Mi
|
||||||
|
dns_cpu_requests: 70m
|
||||||
|
dns_memory_requests: 70Mi
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
- name: Dnsmasq | restart network
|
|
||||||
command: /bin/true
|
|
||||||
notify:
|
|
||||||
- Dnsmasq | reload network
|
|
||||||
- Dnsmasq | update resolvconf
|
|
||||||
when: ansible_os_family != "CoreOS"
|
|
||||||
|
|
||||||
- name: Dnsmasq | reload network
|
|
||||||
service:
|
|
||||||
name: >-
|
|
||||||
{% if ansible_os_family == "RedHat" -%}
|
|
||||||
network
|
|
||||||
{%- elif ansible_os_family == "Debian" -%}
|
|
||||||
networking
|
|
||||||
{%- endif %}
|
|
||||||
state: restarted
|
|
||||||
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
|
||||||
|
|
||||||
- name: Dnsmasq | update resolvconf
|
|
||||||
command: /bin/true
|
|
||||||
notify:
|
|
||||||
- Dnsmasq | reload resolvconf
|
|
||||||
- Dnsmasq | reload kubelet
|
|
||||||
|
|
||||||
- name: Dnsmasq | reload resolvconf
|
|
||||||
command: /sbin/resolvconf -u
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Dnsmasq | reload kubelet
|
|
||||||
service:
|
|
||||||
name: kubelet
|
|
||||||
state: restarted
|
|
||||||
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
|
||||||
ignore_errors: true
|
|
||||||
6
roles/dnsmasq/meta/main.yml
Normal file
6
roles/dnsmasq/meta/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.dnsmasq }}"
|
||||||
|
when: dns_mode == 'dnsmasq_kubedns' and download_localhost|default(false)
|
||||||
|
tags: [download, dnsmasq]
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
---
|
|
||||||
- name: ensure dnsmasq.d directory exists
|
|
||||||
file:
|
|
||||||
path: /etc/dnsmasq.d
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d-available directory exists
|
|
||||||
file:
|
|
||||||
path: /etc/dnsmasq.d-available
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Write dnsmasq configuration
|
|
||||||
template:
|
|
||||||
src: 01-kube-dns.conf.j2
|
|
||||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
mode: 0755
|
|
||||||
backup: yes
|
|
||||||
|
|
||||||
- name: Stat dnsmasq configuration
|
|
||||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
register: sym
|
|
||||||
|
|
||||||
- name: Move previous configuration
|
|
||||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
|
||||||
changed_when: False
|
|
||||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
|
||||||
|
|
||||||
- name: Enable dnsmasq configuration
|
|
||||||
file:
|
|
||||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
state: link
|
|
||||||
|
|
||||||
- name: Create dnsmasq manifests
|
|
||||||
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
|
||||||
with_items:
|
|
||||||
- {file: dnsmasq-ds.yml, type: ds}
|
|
||||||
- {file: dnsmasq-svc.yml, type: svc}
|
|
||||||
register: manifests
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Start Resources
|
|
||||||
kube:
|
|
||||||
name: dnsmasq
|
|
||||||
namespace: kube-system
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: "{{item.item.type}}"
|
|
||||||
filename: /etc/kubernetes/{{item.item.file}}
|
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ manifests.results }}"
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Check for dnsmasq port (pulling image and running container)
|
|
||||||
wait_for:
|
|
||||||
host: "{{dns_server}}"
|
|
||||||
port: 53
|
|
||||||
delay: 5
|
|
||||||
when: inventory_hostname == groups['kube-node'][0]
|
|
||||||
@@ -1,5 +1,61 @@
|
|||||||
---
|
---
|
||||||
- include: dnsmasq.yml
|
- name: ensure dnsmasq.d directory exists
|
||||||
when: "{{ not skip_dnsmasq|bool }}"
|
file:
|
||||||
|
path: /etc/dnsmasq.d
|
||||||
|
state: directory
|
||||||
|
tags: bootstrap-os
|
||||||
|
|
||||||
- include: resolvconf.yml
|
- name: ensure dnsmasq.d-available directory exists
|
||||||
|
file:
|
||||||
|
path: /etc/dnsmasq.d-available
|
||||||
|
state: directory
|
||||||
|
tags: bootstrap-os
|
||||||
|
|
||||||
|
- name: Write dnsmasq configuration
|
||||||
|
template:
|
||||||
|
src: 01-kube-dns.conf.j2
|
||||||
|
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
mode: 0755
|
||||||
|
backup: yes
|
||||||
|
|
||||||
|
- name: Stat dnsmasq configuration
|
||||||
|
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
register: sym
|
||||||
|
|
||||||
|
- name: Move previous configuration
|
||||||
|
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||||
|
changed_when: False
|
||||||
|
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||||
|
|
||||||
|
- name: Enable dnsmasq configuration
|
||||||
|
file:
|
||||||
|
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Create dnsmasq manifests
|
||||||
|
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
|
||||||
|
with_items:
|
||||||
|
- {file: dnsmasq-ds.yml, type: ds}
|
||||||
|
- {file: dnsmasq-svc.yml, type: svc}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Start Resources
|
||||||
|
kube:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: "{{system_namespace}}"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Check for dnsmasq port (pulling image and running container)
|
||||||
|
wait_for:
|
||||||
|
host: "{{dns_server}}"
|
||||||
|
port: 53
|
||||||
|
delay: 5
|
||||||
|
when: inventory_hostname == groups['kube-node'][0]
|
||||||
|
tags: facts
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user