mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
403 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9769965b8 | ||
|
|
52cee1f57f | ||
|
|
056f4b6c00 | ||
|
|
3919d666c1 | ||
|
|
8c8d978cd8 | ||
|
|
dea4210da1 | ||
|
|
a6344f7561 | ||
|
|
c490e5c8a1 | ||
|
|
84052ff0b6 | ||
|
|
9ca374a88d | ||
|
|
648aa7422d | ||
|
|
41aefd131b | ||
|
|
2e90d3fe76 | ||
|
|
4f33c6cfe6 | ||
|
|
f4e6fdc193 | ||
|
|
9d069d54d6 | ||
|
|
fb0ee9d84a | ||
|
|
016b7893c6 | ||
|
|
1724772b20 | ||
|
|
a6a5d0e068 | ||
|
|
d548cb6ac2 | ||
|
|
d9641771ed | ||
|
|
aaa3f1c491 | ||
|
|
5889f7af0e | ||
|
|
5579cddbdb | ||
|
|
2b6866484e | ||
|
|
34a27b0127 | ||
|
|
948d1d61ff | ||
|
|
c96a9bfdfd | ||
|
|
4e80ac1cb3 | ||
|
|
550bda951e | ||
|
|
6b27508c93 | ||
|
|
6684766c5f | ||
|
|
5fd43b7cf0 | ||
|
|
336e2b8c84 | ||
|
|
ee69ac857e | ||
|
|
6caf5b0ac3 | ||
|
|
0f461282c8 | ||
|
|
ab7c110880 | ||
|
|
5046466dae | ||
|
|
0cc581b2da | ||
|
|
7dde23e60b | ||
|
|
e4a48cf53b | ||
|
|
a3fe1e78df | ||
|
|
5f2bb3319b | ||
|
|
429b08a408 | ||
|
|
ec0317e0e4 | ||
|
|
613e3b65ac | ||
|
|
dfb9063b3f | ||
|
|
284354c0da | ||
|
|
82ee60fe8b | ||
|
|
73a8c24089 | ||
|
|
d313be4420 | ||
|
|
83750d14e3 | ||
|
|
123532d2a4 | ||
|
|
1a05b5980f | ||
|
|
a3a772be7b | ||
|
|
42a5055d3c | ||
|
|
a93639650f | ||
|
|
71a230a4fa | ||
|
|
0643ed968f | ||
|
|
1572aaf6ca | ||
|
|
5803de1ac5 | ||
|
|
13874f4610 | ||
|
|
341ea5a6ea | ||
|
|
93be5afb60 | ||
|
|
5ed3916f82 | ||
|
|
7760f75ae0 | ||
|
|
390764c2b4 | ||
|
|
9926395e5b | ||
|
|
422428908a | ||
|
|
76c43f62e2 | ||
|
|
b69d5f6e6e | ||
|
|
0db441b28f | ||
|
|
e3ebabc3b0 | ||
|
|
d0867c8d03 | ||
|
|
b46458a18f | ||
|
|
3ae29d763e | ||
|
|
125cb0aa64 | ||
|
|
783871a253 | ||
|
|
8294a9f1db | ||
|
|
ef43b21597 | ||
|
|
6fdcaa1a63 | ||
|
|
d47a2d03b4 | ||
|
|
739cf59953 | ||
|
|
2e386dfbdc | ||
|
|
ccbb2ee3ae | ||
|
|
eb78ce4c4e | ||
|
|
6084e05a6b | ||
|
|
da8a604c4c | ||
|
|
df2b2d7417 | ||
|
|
d87b0344b5 | ||
|
|
2606e8e1c8 | ||
|
|
b62de1dcb1 | ||
|
|
37057ba969 | ||
|
|
b58512bbda | ||
|
|
c90045c506 | ||
|
|
8b91a43576 | ||
|
|
602dba45ea | ||
|
|
d240073f65 | ||
|
|
69f09e0f18 | ||
|
|
cca26ae3d7 | ||
|
|
1c1894cdd3 | ||
|
|
26a0406669 | ||
|
|
a746d63177 | ||
|
|
0fc5e70c18 | ||
|
|
b74c2f89f0 | ||
|
|
7ac7fc33a7 | ||
|
|
9339903a85 | ||
|
|
33c8d0a1a7 | ||
|
|
5488571108 | ||
|
|
28fbfbbbe7 | ||
|
|
18cdab3671 | ||
|
|
311baeed5d | ||
|
|
f4d4d490af | ||
|
|
256a4e1f29 | ||
|
|
c50c6672f3 | ||
|
|
1345dd07f7 | ||
|
|
e83010b739 | ||
|
|
0dbde9e923 | ||
|
|
d4193bbd22 | ||
|
|
b92404fd0a | ||
|
|
9f01331595 | ||
|
|
82076f90a3 | ||
|
|
e165bb19a0 | ||
|
|
8168689caa | ||
|
|
c71b078c8e | ||
|
|
caa8efbf86 | ||
|
|
bcec5553c5 | ||
|
|
9ac744779c | ||
|
|
4e76bced53 | ||
|
|
60f263b629 | ||
|
|
ea57ce7514 | ||
|
|
439a2e2678 | ||
|
|
346eca5748 | ||
|
|
643b28f9d3 | ||
|
|
1938c96239 | ||
|
|
5dc8f5229f | ||
|
|
f35e5e864f | ||
|
|
47b4242613 | ||
|
|
92c4428cfd | ||
|
|
d97673c13f | ||
|
|
f61071312a | ||
|
|
234608433e | ||
|
|
36b6ae9a3c | ||
|
|
977f82c32c | ||
|
|
1f6dce61ba | ||
|
|
6f07da9f41 | ||
|
|
ac7cc4b7d1 | ||
|
|
d591b59205 | ||
|
|
c6f2102073 | ||
|
|
612266f3e5 | ||
|
|
5fbfa1481e | ||
|
|
430a87d887 | ||
|
|
0c953101ff | ||
|
|
07c144d8a6 | ||
|
|
298ab8e89e | ||
|
|
8812be1e14 | ||
|
|
4268996680 | ||
|
|
34232a170a | ||
|
|
0fa90ec9e8 | ||
|
|
f073ee91ea | ||
|
|
cf502735e9 | ||
|
|
252a30aee8 | ||
|
|
677c4c4cb6 | ||
|
|
6a457720a4 | ||
|
|
f2de250b10 | ||
|
|
6cb9bd2619 | ||
|
|
e727bd52f1 | ||
|
|
d2c57142d3 | ||
|
|
9be099466d | ||
|
|
acae5d4286 | ||
|
|
637eabccce | ||
|
|
e6cfbe42db | ||
|
|
15aec7cd87 | ||
|
|
b5d3f9b2fe | ||
|
|
e38258381f | ||
|
|
e8a1c7a53f | ||
|
|
5bf9b5345e | ||
|
|
2af71f31b4 | ||
|
|
4662b41de6 | ||
|
|
ff5a48c9f9 | ||
|
|
4fb4ac120b | ||
|
|
c7fef6cb76 | ||
|
|
6a7308d5c7 | ||
|
|
4419662fa0 | ||
|
|
b91f8630a3 | ||
|
|
5668e5f767 | ||
|
|
aa0d7ea5d0 | ||
|
|
c52c5f5056 | ||
|
|
90fc407420 | ||
|
|
9fb391fed5 | ||
|
|
fbc55da2bf | ||
|
|
1b1f5f22d4 | ||
|
|
66da43bbbc | ||
|
|
731d32afda | ||
|
|
b4688701ea | ||
|
|
af4c41f32e | ||
|
|
d0a1e15ef3 | ||
|
|
a4da0e4ee2 | ||
|
|
7d816aecf1 | ||
|
|
a63b05efbc | ||
|
|
7f212ca9cb | ||
|
|
296eccd238 | ||
|
|
f94eb0b997 | ||
|
|
a70c3b661e | ||
|
|
0f246bfba4 | ||
|
|
8141b72d5e | ||
|
|
277c5d74cc | ||
|
|
7a86b6c73e | ||
|
|
52a85d5757 | ||
|
|
a76e5dbb11 | ||
|
|
c3e5aac18e | ||
|
|
10b38ab9ff | ||
|
|
32cd6e99b2 | ||
|
|
a2540e3318 | ||
|
|
0b874e8db2 | ||
|
|
192136df20 | ||
|
|
ab8fdba484 | ||
|
|
342e6d6823 | ||
|
|
dfe7bfd127 | ||
|
|
51f55f3748 | ||
|
|
a709cd9aa1 | ||
|
|
d4dfdf68a6 | ||
|
|
a5c21ab2e8 | ||
|
|
c1690c91c2 | ||
|
|
e8195b65e4 | ||
|
|
c9cff5c845 | ||
|
|
da20d9eda4 | ||
|
|
a2bdcabc33 | ||
|
|
1e8ee99d1a | ||
|
|
a07260959d | ||
|
|
5fdea4b947 | ||
|
|
83da5d7657 | ||
|
|
1761f9f891 | ||
|
|
b3282cd0bb | ||
|
|
65ece3bc1d | ||
|
|
e2d6b92370 | ||
|
|
bcd912e854 | ||
|
|
8251781efb | ||
|
|
3b7eaf66b6 | ||
|
|
1d148e9755 | ||
|
|
d84ed1b4b3 | ||
|
|
baf80b7d7e | ||
|
|
9777b3c177 | ||
|
|
d2151500b6 | ||
|
|
e101b72a72 | ||
|
|
b847a43c61 | ||
|
|
19f5093034 | ||
|
|
585102ee20 | ||
|
|
ee7ac22f0d | ||
|
|
0b67c23d42 | ||
|
|
f1ba247844 | ||
|
|
2fa7ee0cf9 | ||
|
|
40fbb3691d | ||
|
|
d9b1435621 | ||
|
|
72ab34f210 | ||
|
|
67ca186dd1 | ||
|
|
85fa3efc06 | ||
|
|
8531ec9186 | ||
|
|
8c3f5f0831 | ||
|
|
c4beee38f6 | ||
|
|
247a1a6e6e | ||
|
|
a4396cfca0 | ||
|
|
53b72920a5 | ||
|
|
536454b079 | ||
|
|
95bb8075f5 | ||
|
|
708d2fbd61 | ||
|
|
103f09b470 | ||
|
|
c4c312c2e6 | ||
|
|
d7babeba2e | ||
|
|
9e59c74c24 | ||
|
|
d94253ff6a | ||
|
|
dc90c594c6 | ||
|
|
094c2c75f3 | ||
|
|
9ddace0566 | ||
|
|
47061a31e2 | ||
|
|
33d897bcb6 | ||
|
|
bf94d6f45e | ||
|
|
1556d1c63e | ||
|
|
c2093b128d | ||
|
|
153b82a803 | ||
|
|
587c8f4701 | ||
|
|
922c6897d1 | ||
|
|
eb6025a184 | ||
|
|
c43f9bc705 | ||
|
|
cd2847c1b9 | ||
|
|
309d6a49b6 | ||
|
|
8281b98e19 | ||
|
|
0e99cbb0ad | ||
|
|
7c7adc7198 | ||
|
|
c1ebd84bf0 | ||
|
|
26aeebb372 | ||
|
|
c7de2a524b | ||
|
|
e924504928 | ||
|
|
63908108b2 | ||
|
|
9bc5da9780 | ||
|
|
4a7d8c6fea | ||
|
|
722aacb633 | ||
|
|
ab0581e114 | ||
|
|
68808534b3 | ||
|
|
0500f27db8 | ||
|
|
cb92b30c25 | ||
|
|
67147cf435 | ||
|
|
96a2439c38 | ||
|
|
e8f97aa437 | ||
|
|
87757d4fcf | ||
|
|
33de89b69f | ||
|
|
9e86f1672b | ||
|
|
28aade3e06 | ||
|
|
35276de37e | ||
|
|
492218a3e1 | ||
|
|
a740e521d2 | ||
|
|
bdc183114a | ||
|
|
7de87d958e | ||
|
|
ffce277c0c | ||
|
|
c226b4e5cb | ||
|
|
094f4d02b8 | ||
|
|
ba615ff94e | ||
|
|
5240465f39 | ||
|
|
ef6a59bbd3 | ||
|
|
cd123f7f24 | ||
|
|
0984b23f0e | ||
|
|
d9dca20d7f | ||
|
|
d8bebcd201 | ||
|
|
f576d70b3c | ||
|
|
ae5ff890d4 | ||
|
|
24ee97d558 | ||
|
|
f949bfd46c | ||
|
|
242e96d251 | ||
|
|
66d9a6ebbc | ||
|
|
4e28f1de4e | ||
|
|
9b8a757526 | ||
|
|
a894a8c7bc | ||
|
|
962155e463 | ||
|
|
c90c981bb2 | ||
|
|
04fe83daa0 | ||
|
|
50d0ab2944 | ||
|
|
608e7dfab2 | ||
|
|
c6e3a8dbbd | ||
|
|
1884d89d3b | ||
|
|
ed95f9ab81 | ||
|
|
9f8466a186 | ||
|
|
8c869a2e3e | ||
|
|
743ad0eb5c | ||
|
|
5253b3ec13 | ||
|
|
ebf8231c9a | ||
|
|
adceaf60e1 | ||
|
|
96c63cc0b6 | ||
|
|
5f2fa6d76f | ||
|
|
bd064e8094 | ||
|
|
8f4e879ca7 | ||
|
|
a19ab91b1b | ||
|
|
4f627baf71 | ||
|
|
3914d51a7e | ||
|
|
bd6c12d686 | ||
|
|
9135c134ec | ||
|
|
59d71740b1 | ||
|
|
078d27c0f1 | ||
|
|
180f2d1fde | ||
|
|
391b155a98 | ||
|
|
47982ea21c | ||
|
|
d0e31f9457 | ||
|
|
7803ae6acb | ||
|
|
97de82bbcc | ||
|
|
bd1d49fabd | ||
|
|
928bbeaf0f | ||
|
|
343a26434d | ||
|
|
107da007b1 | ||
|
|
fb980e4542 | ||
|
|
f12ad6a56f | ||
|
|
5691086ba2 | ||
|
|
831a54e9b7 | ||
|
|
fd64f4d2a0 | ||
|
|
3cd89bed45 | ||
|
|
5b2568adf1 | ||
|
|
48a85ce8f8 | ||
|
|
936927a54f | ||
|
|
8418daa544 | ||
|
|
5c22133492 | ||
|
|
e69b9f6dcb | ||
|
|
b03093be73 | ||
|
|
bc44d5deb3 | ||
|
|
8ab86ac49d | ||
|
|
850b7466cd | ||
|
|
652cbedee5 | ||
|
|
bf96b92def | ||
|
|
ab21f4d169 | ||
|
|
64a39fdb86 | ||
|
|
2192bcccbd | ||
|
|
7237a925eb | ||
|
|
34ed6e1a08 | ||
|
|
8cbdf73eba | ||
|
|
624a964cda | ||
|
|
a14dfe74e1 | ||
|
|
f2e822810a | ||
|
|
a192111e6a | ||
|
|
4271dd6645 | ||
|
|
457ed11b49 | ||
|
|
9f8da6c225 | ||
|
|
ed9a521d6d | ||
|
|
68fafd030d | ||
|
|
edcd5bf67f |
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
.vagrant
|
||||||
|
*.retry
|
||||||
|
inventory/vagrant_ansible_inventory
|
||||||
|
temp
|
||||||
|
.idea
|
||||||
19
.travis.yml
19
.travis.yml
@@ -58,18 +58,18 @@ env:
|
|||||||
CLOUD_IMAGE=rhel-7-sudo
|
CLOUD_IMAGE=rhel-7-sudo
|
||||||
CLOUD_REGION=europe-west1-b
|
CLOUD_REGION=europe-west1-b
|
||||||
|
|
||||||
# Ubuntu 14.04
|
# Ubuntu 16.04
|
||||||
- >-
|
- >-
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
CLOUD_IMAGE=ubuntu-1404-trusty
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||||
CLOUD_REGION=us-central1-c
|
CLOUD_REGION=us-central1-c
|
||||||
- >-
|
- >-
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
CLOUD_IMAGE=ubuntu-1404-trusty
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||||
CLOUD_REGION=us-east1-d
|
CLOUD_REGION=us-east1-d
|
||||||
- >-
|
- >-
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
CLOUD_IMAGE=ubuntu-1404-trusty
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||||
CLOUD_REGION=asia-east1-c
|
CLOUD_REGION=asia-east1-c
|
||||||
|
|
||||||
# Ubuntu 15.10
|
# Ubuntu 15.10
|
||||||
@@ -87,11 +87,6 @@ env:
|
|||||||
CLOUD_REGION=us-east1-d
|
CLOUD_REGION=us-east1-d
|
||||||
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=ubuntu-1404-trusty CLOUD_REGION=asia-east1-c
|
|
||||||
- env: KUBE_NETWORK_PLUGIN=calico CLOUD_IMAGE=ubuntu-1404-trusty CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
# Install Ansible.
|
# Install Ansible.
|
||||||
- pip install --user boto -U
|
- pip install --user boto -U
|
||||||
@@ -114,6 +109,8 @@ before_script:
|
|||||||
- $HOME/.local/bin/ansible-playbook --version
|
- $HOME/.local/bin/ansible-playbook --version
|
||||||
- cp tests/ansible.cfg .
|
- cp tests/ansible.cfg .
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||||
|
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||||
|
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- >
|
- >
|
||||||
@@ -128,7 +125,7 @@ script:
|
|||||||
-e cloud_region=${CLOUD_REGION}
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
## Test Master API
|
## Test Master API
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
@@ -136,6 +133,8 @@ script:
|
|||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
||||||
## Ping the between 2 pod
|
## Ping the between 2 pod
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||||
|
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- >
|
- >
|
||||||
|
|||||||
10
CONTRIBUTING.md
Normal file
10
CONTRIBUTING.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# Contributing guidelines
|
||||||
|
|
||||||
|
## How to become a contributor and submit your own code
|
||||||
|
|
||||||
|
### Contributing A Patch
|
||||||
|
|
||||||
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
|
4. Submit a pull request.
|
||||||
6
OWNERS
Normal file
6
OWNERS
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# See the OWNERS file documentation:
|
||||||
|
# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
|
||||||
|
|
||||||
|
owners:
|
||||||
|
- Smana
|
||||||
|
- ant31
|
||||||
87
README.md
87
README.md
@@ -1,6 +1,87 @@
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
The documentation can be found [THERE](https://docs.kubespray.io)
|
##Deploy a production ready kubernetes cluster
|
||||||
|
|
||||||
[](https://travis-ci.org/kubespray/kubespray)
|
If you have questions, you can [invite yourself](https://slack.kubespray.io/) to **chat** with us on Slack! [](https://kubespray.slack.com)
|
||||||
|
|
||||||
|
- Can be deployed on **AWS, GCE, OpenStack or Baremetal**
|
||||||
|
- **High available** cluster
|
||||||
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
|
- Support most popular **Linux distributions**
|
||||||
|
- **Continuous integration tests**
|
||||||
|
|
||||||
|
|
||||||
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
|
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
||||||
|
**Ansible** usual commands <br>
|
||||||
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
|
||||||
|
* [Requirements](#requirements)
|
||||||
|
* [Getting started](docs/getting-started.md)
|
||||||
|
* [Vagrant install](docs/vagrant.md)
|
||||||
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
|
* [Ansible variables](docs/ansible.md)
|
||||||
|
* [Cloud providers](docs/cloud.md)
|
||||||
|
* [OpenStack](docs/openstack.md)
|
||||||
|
* [AWS](docs/aws.md)
|
||||||
|
* [Network plugins](#network-plugins)
|
||||||
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
|
Supported Linux distributions
|
||||||
|
===============
|
||||||
|
|
||||||
|
* **CoreOS**
|
||||||
|
* **Debian** Wheezy, Jessie
|
||||||
|
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
||||||
|
* **Fedora** 23
|
||||||
|
* **CentOS/RHEL** 7
|
||||||
|
|
||||||
|
Versions
|
||||||
|
--------------
|
||||||
|
|
||||||
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.0 <br>
|
||||||
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
||||||
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
||||||
|
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
||||||
|
[weave](http://weave.works/) v1.6.1 <br>
|
||||||
|
[docker](https://www.docker.com/) v1.10.3 <br>
|
||||||
|
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
--------------
|
||||||
|
|
||||||
|
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
|
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
|
in order to avoid any issue during deployment you should disable your firewall
|
||||||
|
* **Copy your ssh keys** to all the servers part of your inventory.
|
||||||
|
* **Ansible v2.x and python-netaddr**
|
||||||
|
|
||||||
|
|
||||||
|
## Network plugins
|
||||||
|
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
||||||
|
|
||||||
|
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
|
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||||
|
|
||||||
|
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||||
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
||||||
|
|
||||||
|
The choice is defined with the variable `kube_network_plugin`
|
||||||
|
|
||||||
|
|
||||||
|
## CI Tests
|
||||||
|
|
||||||
|
[](https://travis-ci.org/kubespray/kargo) </br>
|
||||||
|
|
||||||
|
### Google Compute Engine
|
||||||
|
|
||||||
|
| Calico | Flannel | Weave |
|
||||||
|
------------- | ------------- | ------------- | ------------- |
|
||||||
|
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
||||||
|
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
||||||
|
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
||||||
|
|
||||||
|
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||||
|
|||||||
9
RELEASE.md
Normal file
9
RELEASE.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Release Process
|
||||||
|
|
||||||
|
The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
|
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
||||||
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
|
4. The release issue is closed
|
||||||
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
||||||
115
Vagrantfile
vendored
Normal file
115
Vagrantfile
vendored
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# # vi: set ft=ruby :
|
||||||
|
|
||||||
|
require 'fileutils'
|
||||||
|
|
||||||
|
Vagrant.require_version ">= 1.8.0"
|
||||||
|
|
||||||
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
# Defaults for config options defined in CONFIG
|
||||||
|
$num_instances = 3
|
||||||
|
$instance_name_prefix = "k8s"
|
||||||
|
$vm_gui = false
|
||||||
|
$vm_memory = 1536
|
||||||
|
$vm_cpus = 1
|
||||||
|
$shared_folders = {}
|
||||||
|
$forwarded_ports = {}
|
||||||
|
$subnet = "172.17.8"
|
||||||
|
$box = "bento/ubuntu-14.04"
|
||||||
|
|
||||||
|
host_vars = {}
|
||||||
|
|
||||||
|
if File.exist?(CONFIG)
|
||||||
|
require CONFIG
|
||||||
|
end
|
||||||
|
|
||||||
|
# if $inventory is not set, try to use example
|
||||||
|
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||||
|
|
||||||
|
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
||||||
|
# to where vagrant expects dynamic inventory to be.
|
||||||
|
if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
|
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant",
|
||||||
|
"provisioners", "ansible")
|
||||||
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||||
|
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||||
|
FileUtils.ln_s($inventory, $vagrant_ansible)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
# always use Vagrants insecure key
|
||||||
|
config.ssh.insert_key = false
|
||||||
|
config.vm.box = $box
|
||||||
|
|
||||||
|
# plugin conflict
|
||||||
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
|
config.vbguest.auto_update = false
|
||||||
|
end
|
||||||
|
|
||||||
|
(1..$num_instances).each do |i|
|
||||||
|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||||
|
config.vm.hostname = vm_name
|
||||||
|
|
||||||
|
if $expose_docker_tcp
|
||||||
|
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
$forwarded_ports.each do |guest, host|
|
||||||
|
config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
||||||
|
config.vm.provider vmware do |v|
|
||||||
|
v.vmx['memsize'] = $vm_memory
|
||||||
|
v.vmx['numvcpus'] = $vm_cpus
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provider :virtualbox do |vb|
|
||||||
|
vb.gui = $vm_gui
|
||||||
|
vb.memory = $vm_memory
|
||||||
|
vb.cpus = $vm_cpus
|
||||||
|
end
|
||||||
|
|
||||||
|
ip = "#{$subnet}.#{i+100}"
|
||||||
|
host_vars[vm_name] = {
|
||||||
|
"ip" => ip,
|
||||||
|
#"access_ip" => ip,
|
||||||
|
"flannel_interface" => ip,
|
||||||
|
"flannel_backend_type" => "host-gw",
|
||||||
|
"local_release_dir" => "/vagrant/temp",
|
||||||
|
"download_run_once" => "True"
|
||||||
|
}
|
||||||
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
# Only execute once the Ansible provisioner,
|
||||||
|
# when all the machines are up and ready.
|
||||||
|
if i == $num_instances
|
||||||
|
config.vm.provision "ansible" do |ansible|
|
||||||
|
ansible.playbook = "cluster.yml"
|
||||||
|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
|
ansible.inventory_path = $inventory
|
||||||
|
end
|
||||||
|
ansible.sudo = true
|
||||||
|
ansible.limit = "all"
|
||||||
|
ansible.host_key_checking = false
|
||||||
|
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
||||||
|
ansible.host_vars = host_vars
|
||||||
|
#ansible.tags = ['download']
|
||||||
|
ansible.groups = {
|
||||||
|
# The first three nodes should be etcd servers
|
||||||
|
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
||||||
|
# The first two nodes should be masters
|
||||||
|
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
||||||
|
# all nodes should be kube nodes
|
||||||
|
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
||||||
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
|
}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
26
cluster.yml
26
cluster.yml
@@ -1,18 +1,36 @@
|
|||||||
---
|
---
|
||||||
- hosts: k8s-cluster
|
- hosts: all
|
||||||
|
gather_facts: false
|
||||||
|
roles:
|
||||||
|
- bootstrap-os
|
||||||
|
tags:
|
||||||
|
- bootstrap-os
|
||||||
|
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: etcd:!k8s-cluster
|
||||||
|
roles:
|
||||||
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
- { role: etcd, tags: etcd }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: adduser, tags: adduser }
|
|
||||||
- { role: download, tags: download }
|
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: etcd, tags: etcd }
|
- { role: etcd, tags: etcd }
|
||||||
- { role: docker, tags: docker, when: ansible_os_family != "CoreOS" }
|
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: dnsmasq, tags: dnsmasq }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
roles:
|
||||||
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
59
code-of-conduct.md
Normal file
59
code-of-conduct.md
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
## Kubernetes Community Code of Conduct
|
||||||
|
|
||||||
|
### Contributor Code of Conduct
|
||||||
|
|
||||||
|
As contributors and maintainers of this project, and in the interest of fostering
|
||||||
|
an open and welcoming community, we pledge to respect all people who contribute
|
||||||
|
through reporting issues, posting feature requests, updating documentation,
|
||||||
|
submitting pull requests or patches, and other activities.
|
||||||
|
|
||||||
|
We are committed to making participation in this project a harassment-free experience for
|
||||||
|
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||||
|
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||||
|
religion, or nationality.
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery
|
||||||
|
* Personal attacks
|
||||||
|
* Trolling or insulting/derogatory comments
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing other's private information, such as physical or electronic addresses,
|
||||||
|
without explicit permission
|
||||||
|
* Other unethical or unprofessional conduct.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||||
|
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||||
|
commit themselves to fairly and consistently applying these principles to every aspect
|
||||||
|
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||||
|
Conduct may be permanently removed from the project team.
|
||||||
|
|
||||||
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||||
|
opening an issue or contacting one or more of the project maintainers.
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the Contributor Covenant
|
||||||
|
(http://contributor-covenant.org), version 1.2.0, available at
|
||||||
|
http://contributor-covenant.org/version/1/2/0/
|
||||||
|
|
||||||
|
### Kubernetes Events Code of Conduct
|
||||||
|
|
||||||
|
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||||
|
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||||
|
with their employer's policies on appropriate workplace behavior.
|
||||||
|
|
||||||
|
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||||
|
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||||
|
be especially aware of these concerns.
|
||||||
|
|
||||||
|
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||||
|
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||||
|
be engaging in discriminatory or offensive speech or actions.
|
||||||
|
|
||||||
|
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||||
|
|
||||||
|
|
||||||
|
[]()
|
||||||
2
contrib/terraform/aws/.gitignore
vendored
Normal file
2
contrib/terraform/aws/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
*.tfstate*
|
||||||
|
inventory
|
||||||
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
@@ -0,0 +1,261 @@
|
|||||||
|
variable "deploymentName" {
|
||||||
|
type = "string"
|
||||||
|
description = "The desired name of your deployment."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "numControllers"{
|
||||||
|
type = "string"
|
||||||
|
description = "Desired # of controllers."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "numEtcd" {
|
||||||
|
type = "string"
|
||||||
|
description = "Desired # of etcd nodes. Should be an odd number."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "numNodes" {
|
||||||
|
type = "string"
|
||||||
|
description = "Desired # of nodes."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "volSizeController" {
|
||||||
|
type = "string"
|
||||||
|
description = "Volume size for the controllers (GB)."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "volSizeEtcd" {
|
||||||
|
type = "string"
|
||||||
|
description = "Volume size for etcd (GB)."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "volSizeNodes" {
|
||||||
|
type = "string"
|
||||||
|
description = "Volume size for nodes (GB)."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "subnet" {
|
||||||
|
type = "string"
|
||||||
|
description = "The subnet in which to put your cluster."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "securityGroups" {
|
||||||
|
type = "string"
|
||||||
|
description = "The sec. groups in which to put your cluster."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ami"{
|
||||||
|
type = "string"
|
||||||
|
description = "AMI to use for all VMs in cluster."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "SSHKey" {
|
||||||
|
type = "string"
|
||||||
|
description = "SSH key to use for VMs."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "master_instance_type" {
|
||||||
|
type = "string"
|
||||||
|
description = "Size of VM to use for masters."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "etcd_instance_type" {
|
||||||
|
type = "string"
|
||||||
|
description = "Size of VM to use for etcd."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_instance_type" {
|
||||||
|
type = "string"
|
||||||
|
description = "Size of VM to use for nodes."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "terminate_protect" {
|
||||||
|
type = "string"
|
||||||
|
default = "false"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "awsRegion" {
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "aws" {
|
||||||
|
region = "${var.awsRegion}"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "iam_prefix" {
|
||||||
|
type = "string"
|
||||||
|
description = "Prefix name for IAM profiles"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_master_profile"
|
||||||
|
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kubernetes_master_role" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_master_role"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kubernetes_master_policy" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_master_policy"
|
||||||
|
role = "${aws_iam_role.kubernetes_master_role.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["ec2:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["elasticloadbalancing:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_node_profile"
|
||||||
|
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kubernetes_node_role" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_node_role"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kubernetes_node_policy" {
|
||||||
|
name = "${var.iam_prefix}_kubernetes_node_policy"
|
||||||
|
role = "${aws_iam_role.kubernetes_node_role.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:Describe*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:AttachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:DetachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "master" {
|
||||||
|
count = "${var.numControllers}"
|
||||||
|
ami = "${var.ami}"
|
||||||
|
instance_type = "${var.master_instance_type}"
|
||||||
|
subnet_id = "${var.subnet}"
|
||||||
|
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||||
|
key_name = "${var.SSHKey}"
|
||||||
|
disable_api_termination = "${var.terminate_protect}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||||
|
root_block_device {
|
||||||
|
volume_size = "${var.volSizeController}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "${var.deploymentName}-master-${count.index + 1}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "etcd" {
|
||||||
|
count = "${var.numEtcd}"
|
||||||
|
ami = "${var.ami}"
|
||||||
|
instance_type = "${var.etcd_instance_type}"
|
||||||
|
subnet_id = "${var.subnet}"
|
||||||
|
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||||
|
key_name = "${var.SSHKey}"
|
||||||
|
disable_api_termination = "${var.terminate_protect}"
|
||||||
|
root_block_device {
|
||||||
|
volume_size = "${var.volSizeEtcd}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "${var.deploymentName}-etcd-${count.index + 1}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_instance" "minion" {
|
||||||
|
count = "${var.numNodes}"
|
||||||
|
ami = "${var.ami}"
|
||||||
|
instance_type = "${var.node_instance_type}"
|
||||||
|
subnet_id = "${var.subnet}"
|
||||||
|
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||||
|
key_name = "${var.SSHKey}"
|
||||||
|
disable_api_termination = "${var.terminate_protect}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||||
|
root_block_device {
|
||||||
|
volume_size = "${var.volSizeNodes}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "${var.deploymentName}-minion-${count.index + 1}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "kubernetes_master_profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "kubernetes_node_profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "master-ip" {
|
||||||
|
value = "${join(", ", aws_instance.master.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "etcd-ip" {
|
||||||
|
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "minion-ip" {
|
||||||
|
value = "${join(", ", aws_instance.minion.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
variable "SSHUser" {
|
||||||
|
type = "string"
|
||||||
|
description = "SSH User for VMs."
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "null_resource" "ansible-provision" {
|
||||||
|
|
||||||
|
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
|
||||||
|
|
||||||
|
##Create Master Inventory
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"[kube-master]\" > inventory"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||||
|
}
|
||||||
|
|
||||||
|
##Create ETCD Inventory
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"\n[etcd]\" >> inventory"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||||
|
}
|
||||||
|
|
||||||
|
##Create Nodes Inventory
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"\n[kube-node]\" >> inventory"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\netcd\" >> inventory"
|
||||||
|
}
|
||||||
|
}
|
||||||
28
contrib/terraform/aws/README.md
Normal file
28
contrib/terraform/aws/README.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
## Kubernetes on AWS with Terraform
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
|
||||||
|
- This will create nodes in a VPC inside of AWS
|
||||||
|
|
||||||
|
- A dynamic number of masters, etcd, and nodes can be created
|
||||||
|
|
||||||
|
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
|
||||||
|
|
||||||
|
**How to Use:**
|
||||||
|
|
||||||
|
- Export the variables for your Amazon credentials:
|
||||||
|
|
||||||
|
```
|
||||||
|
export AWS_ACCESS_KEY_ID="xxx"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="yyy"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Update contrib/terraform/aws/terraform.tfvars with your data
|
||||||
|
|
||||||
|
- Run with `terraform apply`
|
||||||
|
|
||||||
|
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
|
||||||
|
|
||||||
|
**Future Work:**
|
||||||
|
|
||||||
|
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.
|
||||||
22
contrib/terraform/aws/terraform.tfvars
Normal file
22
contrib/terraform/aws/terraform.tfvars
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
deploymentName="test-kube-deploy"
|
||||||
|
|
||||||
|
numControllers="2"
|
||||||
|
numEtcd="3"
|
||||||
|
numNodes="2"
|
||||||
|
|
||||||
|
volSizeController="20"
|
||||||
|
volSizeEtcd="20"
|
||||||
|
volSizeNodes="20"
|
||||||
|
|
||||||
|
awsRegion="us-west-2"
|
||||||
|
subnet="subnet-xxxxx"
|
||||||
|
ami="ami-32a85152"
|
||||||
|
securityGroups="sg-xxxxx"
|
||||||
|
SSHUser="core"
|
||||||
|
SSHKey="my-key"
|
||||||
|
|
||||||
|
master_instance_type="m3.xlarge"
|
||||||
|
etcd_instance_type="m3.xlarge"
|
||||||
|
node_instance_type="m3.xlarge"
|
||||||
|
|
||||||
|
terminate_protect="false"
|
||||||
137
contrib/terraform/openstack/README.md
Normal file
137
contrib/terraform/openstack/README.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
# Kubernetes on Openstack with Terraform
|
||||||
|
|
||||||
|
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||||
|
Openstack.
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
|
||||||
|
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
|
||||||
|
should work on most modern installs of OpenStack that support the basic
|
||||||
|
services.
|
||||||
|
|
||||||
|
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||||
|
|
||||||
|
* floating-ips are used for access
|
||||||
|
* you already have a suitable OS image in glance
|
||||||
|
* you already have both an internal network and a floating-ip pool created
|
||||||
|
* you have security-groups enabled
|
||||||
|
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||||
|
|
||||||
|
## Terraform
|
||||||
|
|
||||||
|
Terraform will be used to provision all of the OpenStack resources required to
|
||||||
|
run Docker Swarm. It is also used to deploy and provision the software
|
||||||
|
requirements.
|
||||||
|
|
||||||
|
### Prep
|
||||||
|
|
||||||
|
#### OpenStack
|
||||||
|
|
||||||
|
Ensure your OpenStack credentials are loaded in environment variables. This is
|
||||||
|
how I do it:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ source ~/.stackrc
|
||||||
|
```
|
||||||
|
|
||||||
|
You will need two networks before installing, an internal network and
|
||||||
|
an external (floating IP Pool) network. The internet network can be shared as
|
||||||
|
we use security groups to provide network segregation. Due to the many
|
||||||
|
differences between OpenStack installs the Terraform does not attempt to create
|
||||||
|
these for you.
|
||||||
|
|
||||||
|
By default Terraform will expect that your networks are called `internal` and
|
||||||
|
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
|
||||||
|
|
||||||
|
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||||
|
|
||||||
|
All OpenStack resources will use the Terraform variable `cluster_name` (
|
||||||
|
default `example`) in their name to make it easier to track. For example the
|
||||||
|
first compute resource will be named `example-kubernetes-1`.
|
||||||
|
|
||||||
|
#### Terraform
|
||||||
|
|
||||||
|
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||||
|
step is required by the terraform provisioner:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ eval $(ssh-agent -s)
|
||||||
|
$ ssh-add ~/.ssh/id_rsa
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Ensure that you have your Openstack credentials loaded into Terraform
|
||||||
|
environment variables. Likely via a command similar to:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ echo Setting up Terraform creds && \
|
||||||
|
export TF_VAR_username=${OS_USERNAME} && \
|
||||||
|
export TF_VAR_password=${OS_PASSWORD} && \
|
||||||
|
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||||
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
|
```
|
||||||
|
|
||||||
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
|
```
|
||||||
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||||
|
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||||
|
description: "" => "example - Kubernetes Master"
|
||||||
|
name: "" => "example-k8s-master"
|
||||||
|
rule.#: "" => "<computed>"
|
||||||
|
...
|
||||||
|
...
|
||||||
|
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
||||||
|
|
||||||
|
The state of your infrastructure has been saved to the path
|
||||||
|
below. This state is required to modify and destroy your
|
||||||
|
infrastructure, so keep it safe. To inspect the complete state
|
||||||
|
use the `terraform show` command.
|
||||||
|
|
||||||
|
State path: contrib/terraform/openstack/terraform.tfstate
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure you can connect to the hosts:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||||
|
example-k8s_node-1 | SUCCESS => {
|
||||||
|
"changed": false,
|
||||||
|
"ping": "pong"
|
||||||
|
}
|
||||||
|
example-etcd-1 | SUCCESS => {
|
||||||
|
"changed": false,
|
||||||
|
"ping": "pong"
|
||||||
|
}
|
||||||
|
example-k8s-master-1 | SUCCESS => {
|
||||||
|
"changed": false,
|
||||||
|
"ping": "pong"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||||
|
|
||||||
|
Deploy kubernetes:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
# clean up:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ terraform destroy
|
||||||
|
Do you really want to destroy?
|
||||||
|
Terraform will delete all your managed infrastructure.
|
||||||
|
There is no undo. Only 'yes' will be accepted to confirm.
|
||||||
|
|
||||||
|
Enter a value: yes
|
||||||
|
...
|
||||||
|
...
|
||||||
|
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
||||||
|
```
|
||||||
136
contrib/terraform/openstack/group_vars/all.yml
Normal file
136
contrib/terraform/openstack/group_vars/all.yml
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# Directory where the binaries will be installed
|
||||||
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
|
# Where the binaries will be downloaded.
|
||||||
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
local_release_dir: "/tmp/releases"
|
||||||
|
|
||||||
|
# Uncomment this line for CoreOS only.
|
||||||
|
# Directory where python binary is installed
|
||||||
|
# ansible_python_interpreter: "/opt/bin/python"
|
||||||
|
|
||||||
|
# This is the group that the cert creation scripts chgrp the
|
||||||
|
# cert files to. Not really changable...
|
||||||
|
kube_cert_group: kube-cert
|
||||||
|
|
||||||
|
# Cluster Loglevel configuration
|
||||||
|
kube_log_level: 2
|
||||||
|
|
||||||
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
|
kube_api_pwd: "changeme"
|
||||||
|
kube_users:
|
||||||
|
kube:
|
||||||
|
pass: "{{kube_api_pwd}}"
|
||||||
|
role: admin
|
||||||
|
root:
|
||||||
|
pass: "changeme"
|
||||||
|
role: admin
|
||||||
|
|
||||||
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
|
cluster_name: cluster.local
|
||||||
|
|
||||||
|
# For some environments, each node has a pubilcally accessible
|
||||||
|
# address and an address it should bind services to. These are
|
||||||
|
# really inventory level variables, but described here for consistency.
|
||||||
|
#
|
||||||
|
# When advertising access, the access_ip will be used, but will defer to
|
||||||
|
# ip and then the default ansible ip when unspecified.
|
||||||
|
#
|
||||||
|
# When binding to restrict access, the ip variable will be used, but will
|
||||||
|
# defer to the default ansible ip when unspecified.
|
||||||
|
#
|
||||||
|
# The ip variable is used for specific address binding, e.g. listen address
|
||||||
|
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
||||||
|
# systems where one address should be preferred over another.
|
||||||
|
# ip: 10.2.2.2
|
||||||
|
#
|
||||||
|
# The access_ip variable is used to define how other nodes should access
|
||||||
|
# the node. This is used in flannel to allow other flannel nodes to see
|
||||||
|
# this node for example. The access_ip is really useful AWS and Google
|
||||||
|
# environments where the nodes are accessed remotely by the "public" ip,
|
||||||
|
# but don't know about that address themselves.
|
||||||
|
# access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
kube_network_plugin: flannel
|
||||||
|
|
||||||
|
# Kubernetes internal network for services, unused block of space.
|
||||||
|
kube_service_addresses: 10.233.0.0/18
|
||||||
|
|
||||||
|
# internal network. When used, it will assign IP
|
||||||
|
# addresses from this range to individual pods.
|
||||||
|
# This network must be unused in your network infrastructure!
|
||||||
|
kube_pods_subnet: 10.233.64.0/18
|
||||||
|
|
||||||
|
# internal network total size (optional). This is the prefix of the
|
||||||
|
# entire network. Must be unused in your environment.
|
||||||
|
# kube_network_prefix: 18
|
||||||
|
|
||||||
|
# internal network node size allocation (optional). This is the size allocated
|
||||||
|
# to each node on your network. With these defaults you should have
|
||||||
|
# room for 4096 nodes with 254 pods per node.
|
||||||
|
kube_network_node_prefix: 24
|
||||||
|
|
||||||
|
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||||
|
peer_with_router: false
|
||||||
|
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||||
|
# The subnets of each nodes will be distributed by the datacenter router
|
||||||
|
|
||||||
|
# The port the API Server will be listening on.
|
||||||
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||||
|
kube_apiserver_port: 443 # (https)
|
||||||
|
kube_apiserver_insecure_port: 8080 # (http)
|
||||||
|
|
||||||
|
# Internal DNS configuration.
|
||||||
|
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||||
|
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||||
|
# as it greatly simplifies configuration of your applications - you can use
|
||||||
|
# service names instead of magic environment variables.
|
||||||
|
# You still must manually configure all your containers to use this DNS server,
|
||||||
|
# Kubernetes won't do this for you (yet).
|
||||||
|
|
||||||
|
# Upstream dns servers used by dnsmasq
|
||||||
|
upstream_dns_servers:
|
||||||
|
- 8.8.8.8
|
||||||
|
- 8.8.4.4
|
||||||
|
#
|
||||||
|
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||||
|
dns_setup: true
|
||||||
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
#
|
||||||
|
# # Ip address of the kubernetes skydns service
|
||||||
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
|
|
||||||
|
# There are some changes specific to the cloud providers
|
||||||
|
# for instance we need to encapsulate packets with some network plugins
|
||||||
|
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||||
|
# When openstack is used make sure to source in the openstack credentials
|
||||||
|
# like you would do when using nova-client before starting the playbook.
|
||||||
|
# cloud_provider:
|
||||||
|
|
||||||
|
# For multi masters architecture:
|
||||||
|
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
||||||
|
# This domain name will be inserted into the /etc/hosts file of all servers
|
||||||
|
# configuration example with haproxy :
|
||||||
|
# listen kubernetes-apiserver-https
|
||||||
|
# bind 10.99.0.21:8383
|
||||||
|
# option ssl-hello-chk
|
||||||
|
# mode tcp
|
||||||
|
# timeout client 3h
|
||||||
|
# timeout server 3h
|
||||||
|
# server master1 10.99.0.26:443
|
||||||
|
# server master2 10.99.0.27:443
|
||||||
|
# balance roundrobin
|
||||||
|
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||||
|
|
||||||
|
## Set these proxy values in order to update docker daemon to use proxies
|
||||||
|
# http_proxy: ""
|
||||||
|
# https_proxy: ""
|
||||||
|
# no_proxy: ""
|
||||||
|
|
||||||
|
## A string of extra options to pass to the docker daemon.
|
||||||
|
## This string should be exactly as you wish it to appear.
|
||||||
|
## An obvious use case is allowing insecure-registry access
|
||||||
|
## to self hosted registries like so:
|
||||||
|
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||||
1
contrib/terraform/openstack/hosts
Symbolic link
1
contrib/terraform/openstack/hosts
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../terraform.py
|
||||||
94
contrib/terraform/openstack/kubespray.tf
Normal file
94
contrib/terraform/openstack/kubespray.tf
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||||
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||||
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "openstack_compute_keypair_v2" "k8s" {
|
||||||
|
name = "kubernetes-${var.cluster_name}"
|
||||||
|
public_key = "${file(var.public_key_path)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||||
|
name = "${var.cluster_name}-k8s-master"
|
||||||
|
description = "${var.cluster_name} - Kubernetes Master"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||||
|
name = "${var.cluster_name}-k8s"
|
||||||
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
rule {
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_port = "22"
|
||||||
|
to_port = "22"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
from_port = "-1"
|
||||||
|
to_port = "-1"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_port = "1"
|
||||||
|
to_port = "65535"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
ip_protocol = "udp"
|
||||||
|
from_port = "1"
|
||||||
|
to_port = "65535"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
from_port = "-1"
|
||||||
|
to_port = "-1"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
|
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#output "msg" {
|
||||||
|
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||||
|
#}
|
||||||
238
contrib/terraform/openstack/terraform.tfstate
Normal file
238
contrib/terraform/openstack/terraform.tfstate
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"serial": 17,
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root"
|
||||||
|
],
|
||||||
|
"outputs": {},
|
||||||
|
"resources": {
|
||||||
|
"openstack_compute_instance_v2.k8s_master.0": {
|
||||||
|
"type": "openstack_compute_instance_v2",
|
||||||
|
"depends_on": [
|
||||||
|
"openstack_compute_keypair_v2.k8s",
|
||||||
|
"openstack_compute_secgroup_v2.k8s",
|
||||||
|
"openstack_compute_secgroup_v2.k8s_master",
|
||||||
|
"openstack_networking_floatingip_v2.k8s_master"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||||
|
"attributes": {
|
||||||
|
"access_ip_v4": "173.247.105.12",
|
||||||
|
"access_ip_v6": "",
|
||||||
|
"flavor_id": "3",
|
||||||
|
"flavor_name": "m1.medium",
|
||||||
|
"floating_ip": "173.247.105.12",
|
||||||
|
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||||
|
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||||
|
"image_name": "ubuntu-14.04",
|
||||||
|
"key_pair": "kubernetes-example",
|
||||||
|
"metadata.#": "2",
|
||||||
|
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||||
|
"metadata.ssh_user": "ubuntu",
|
||||||
|
"name": "example-k8s-master-1",
|
||||||
|
"network.#": "1",
|
||||||
|
"network.0.access_network": "false",
|
||||||
|
"network.0.fixed_ip_v4": "10.230.7.86",
|
||||||
|
"network.0.fixed_ip_v6": "",
|
||||||
|
"network.0.floating_ip": "173.247.105.12",
|
||||||
|
"network.0.mac": "fa:16:3e:fb:82:1d",
|
||||||
|
"network.0.name": "internal",
|
||||||
|
"network.0.port": "",
|
||||||
|
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||||
|
"security_groups.#": "2",
|
||||||
|
"security_groups.2779334175": "example-k8s",
|
||||||
|
"security_groups.3772290257": "example-k8s-master",
|
||||||
|
"volume.#": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_compute_instance_v2.k8s_master.1": {
|
||||||
|
"type": "openstack_compute_instance_v2",
|
||||||
|
"depends_on": [
|
||||||
|
"openstack_compute_keypair_v2.k8s",
|
||||||
|
"openstack_compute_secgroup_v2.k8s",
|
||||||
|
"openstack_compute_secgroup_v2.k8s_master",
|
||||||
|
"openstack_networking_floatingip_v2.k8s_master"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||||
|
"attributes": {
|
||||||
|
"access_ip_v4": "173.247.105.70",
|
||||||
|
"access_ip_v6": "",
|
||||||
|
"flavor_id": "3",
|
||||||
|
"flavor_name": "m1.medium",
|
||||||
|
"floating_ip": "173.247.105.70",
|
||||||
|
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||||
|
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||||
|
"image_name": "ubuntu-14.04",
|
||||||
|
"key_pair": "kubernetes-example",
|
||||||
|
"metadata.#": "2",
|
||||||
|
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||||
|
"metadata.ssh_user": "ubuntu",
|
||||||
|
"name": "example-k8s-master-2",
|
||||||
|
"network.#": "1",
|
||||||
|
"network.0.access_network": "false",
|
||||||
|
"network.0.fixed_ip_v4": "10.230.7.85",
|
||||||
|
"network.0.fixed_ip_v6": "",
|
||||||
|
"network.0.floating_ip": "173.247.105.70",
|
||||||
|
"network.0.mac": "fa:16:3e:33:98:e6",
|
||||||
|
"network.0.name": "internal",
|
||||||
|
"network.0.port": "",
|
||||||
|
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||||
|
"security_groups.#": "2",
|
||||||
|
"security_groups.2779334175": "example-k8s",
|
||||||
|
"security_groups.3772290257": "example-k8s-master",
|
||||||
|
"volume.#": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_compute_instance_v2.k8s_node": {
|
||||||
|
"type": "openstack_compute_instance_v2",
|
||||||
|
"depends_on": [
|
||||||
|
"openstack_compute_keypair_v2.k8s",
|
||||||
|
"openstack_compute_secgroup_v2.k8s",
|
||||||
|
"openstack_networking_floatingip_v2.k8s_node"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||||
|
"attributes": {
|
||||||
|
"access_ip_v4": "173.247.105.76",
|
||||||
|
"access_ip_v6": "",
|
||||||
|
"flavor_id": "3",
|
||||||
|
"flavor_name": "m1.medium",
|
||||||
|
"floating_ip": "173.247.105.76",
|
||||||
|
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||||
|
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||||
|
"image_name": "ubuntu-14.04",
|
||||||
|
"key_pair": "kubernetes-example",
|
||||||
|
"metadata.#": "2",
|
||||||
|
"metadata.kubespray_groups": "kube-node,k8s-cluster",
|
||||||
|
"metadata.ssh_user": "ubuntu",
|
||||||
|
"name": "example-k8s-node-1",
|
||||||
|
"network.#": "1",
|
||||||
|
"network.0.access_network": "false",
|
||||||
|
"network.0.fixed_ip_v4": "10.230.7.84",
|
||||||
|
"network.0.fixed_ip_v6": "",
|
||||||
|
"network.0.floating_ip": "173.247.105.76",
|
||||||
|
"network.0.mac": "fa:16:3e:53:57:bc",
|
||||||
|
"network.0.name": "internal",
|
||||||
|
"network.0.port": "",
|
||||||
|
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||||
|
"security_groups.#": "1",
|
||||||
|
"security_groups.2779334175": "example-k8s",
|
||||||
|
"volume.#": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_compute_keypair_v2.k8s": {
|
||||||
|
"type": "openstack_compute_keypair_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "kubernetes-example",
|
||||||
|
"attributes": {
|
||||||
|
"id": "kubernetes-example",
|
||||||
|
"name": "kubernetes-example",
|
||||||
|
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9nU6RPYCabjLH1LvJfpp9L8r8q5RZ6niS92zD95xpm2b2obVydWe0tCSFdmULBuvT8Q8YQ4qOG2g/oJlsGOsia+4CQjYEUV9CgTH9H5HK3vUOwtO5g2eFnYKSmI/4znHa0WYpQFnQK2kSSeCs2beTlJhc8vjfN/2HHmuny6SxNSbnCk/nZdwamxEONIVdjlm3CSBlq4PChT/D/uUqm/nOm0Zqdk9ZlTBkucsjiOCJeEzg4HioKmIH8ewqsKuS7kMADHPH98JMdBhTKbYbLrxTC/RfiaON58WJpmdOA935TT5Td5aVQZoqe/i/5yFRp5fMG239jtfbM0Igu44TEIib pczarkowski@Pauls-MacBook-Pro.local\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_compute_secgroup_v2.k8s": {
|
||||||
|
"type": "openstack_compute_secgroup_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||||
|
"attributes": {
|
||||||
|
"description": "example - Kubernetes",
|
||||||
|
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||||
|
"name": "example-k8s",
|
||||||
|
"rule.#": "5",
|
||||||
|
"rule.112275015.cidr": "",
|
||||||
|
"rule.112275015.from_group_id": "",
|
||||||
|
"rule.112275015.from_port": "1",
|
||||||
|
"rule.112275015.id": "597170c9-b35a-45c0-8717-652a342f3fd6",
|
||||||
|
"rule.112275015.ip_protocol": "tcp",
|
||||||
|
"rule.112275015.self": "true",
|
||||||
|
"rule.112275015.to_port": "65535",
|
||||||
|
"rule.2180185248.cidr": "0.0.0.0/0",
|
||||||
|
"rule.2180185248.from_group_id": "",
|
||||||
|
"rule.2180185248.from_port": "-1",
|
||||||
|
"rule.2180185248.id": "ffdcdd5e-f18b-4537-b502-8849affdfed9",
|
||||||
|
"rule.2180185248.ip_protocol": "icmp",
|
||||||
|
"rule.2180185248.self": "false",
|
||||||
|
"rule.2180185248.to_port": "-1",
|
||||||
|
"rule.3267409695.cidr": "",
|
||||||
|
"rule.3267409695.from_group_id": "",
|
||||||
|
"rule.3267409695.from_port": "-1",
|
||||||
|
"rule.3267409695.id": "4f91d9ca-940c-4f4d-9ce1-024cbd7d9c54",
|
||||||
|
"rule.3267409695.ip_protocol": "icmp",
|
||||||
|
"rule.3267409695.self": "true",
|
||||||
|
"rule.3267409695.to_port": "-1",
|
||||||
|
"rule.635693822.cidr": "",
|
||||||
|
"rule.635693822.from_group_id": "",
|
||||||
|
"rule.635693822.from_port": "1",
|
||||||
|
"rule.635693822.id": "c6816e5b-a1a4-4071-acce-d09b92d14d49",
|
||||||
|
"rule.635693822.ip_protocol": "udp",
|
||||||
|
"rule.635693822.self": "true",
|
||||||
|
"rule.635693822.to_port": "65535",
|
||||||
|
"rule.836640770.cidr": "0.0.0.0/0",
|
||||||
|
"rule.836640770.from_group_id": "",
|
||||||
|
"rule.836640770.from_port": "22",
|
||||||
|
"rule.836640770.id": "8845acba-636b-4c23-b9e2-5bff76d9008d",
|
||||||
|
"rule.836640770.ip_protocol": "tcp",
|
||||||
|
"rule.836640770.self": "false",
|
||||||
|
"rule.836640770.to_port": "22"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_compute_secgroup_v2.k8s_master": {
|
||||||
|
"type": "openstack_compute_secgroup_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||||
|
"attributes": {
|
||||||
|
"description": "example - Kubernetes Master",
|
||||||
|
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||||
|
"name": "example-k8s-master",
|
||||||
|
"rule.#": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_networking_floatingip_v2.k8s_master.0": {
|
||||||
|
"type": "openstack_networking_floatingip_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||||
|
"attributes": {
|
||||||
|
"address": "173.247.105.12",
|
||||||
|
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||||
|
"pool": "external",
|
||||||
|
"port_id": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_networking_floatingip_v2.k8s_master.1": {
|
||||||
|
"type": "openstack_networking_floatingip_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||||
|
"attributes": {
|
||||||
|
"address": "173.247.105.70",
|
||||||
|
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||||
|
"pool": "external",
|
||||||
|
"port_id": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openstack_networking_floatingip_v2.k8s_node": {
|
||||||
|
"type": "openstack_networking_floatingip_v2",
|
||||||
|
"primary": {
|
||||||
|
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||||
|
"attributes": {
|
||||||
|
"address": "173.247.105.76",
|
||||||
|
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||||
|
"pool": "external",
|
||||||
|
"port_id": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
13
contrib/terraform/openstack/terraform.tfstate.backup
Normal file
13
contrib/terraform/openstack/terraform.tfstate.backup
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"serial": 16,
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root"
|
||||||
|
],
|
||||||
|
"outputs": {},
|
||||||
|
"resources": {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
61
contrib/terraform/openstack/variables.tf
Normal file
61
contrib/terraform/openstack/variables.tf
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
variable "cluster_name" {
|
||||||
|
default = "example"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes" {
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "public_key_path" {
|
||||||
|
description = "The path of the ssh pub key"
|
||||||
|
default = "~/.ssh/id_rsa.pub"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "image" {
|
||||||
|
description = "the image to use"
|
||||||
|
default = "ubuntu-14.04"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_user" {
|
||||||
|
description = "used to fill out tags for ansible inventory"
|
||||||
|
default = "ubuntu"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "flavor_k8s_master" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "flavor_k8s_node" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "network_name" {
|
||||||
|
description = "name of the internal network to use"
|
||||||
|
default = "internal"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "floatingip_pool" {
|
||||||
|
description = "name of the floating ip pool to use"
|
||||||
|
default = "external"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "username" {
|
||||||
|
description = "Your openstack username"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "password" {
|
||||||
|
description = "Your openstack password"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "tenant" {
|
||||||
|
description = "Your openstack tenant/project"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "auth_url" {
|
||||||
|
description = "Your openstack auth URL"
|
||||||
|
}
|
||||||
736
contrib/terraform/terraform.py
Executable file
736
contrib/terraform/terraform.py
Executable file
@@ -0,0 +1,736 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2015 Cisco Systems, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# original: https://github.com/CiscoCloud/terraform.py
|
||||||
|
|
||||||
|
"""\
|
||||||
|
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
|
||||||
|
directory and generates an inventory based on them.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals, print_function
|
||||||
|
import argparse
|
||||||
|
from collections import defaultdict
|
||||||
|
from functools import wraps
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
VERSION = '0.3.0pre'
|
||||||
|
|
||||||
|
|
||||||
|
def tfstates(root=None):
|
||||||
|
root = root or os.getcwd()
|
||||||
|
for dirpath, _, filenames in os.walk(root):
|
||||||
|
for name in filenames:
|
||||||
|
if os.path.splitext(name)[-1] == '.tfstate':
|
||||||
|
yield os.path.join(dirpath, name)
|
||||||
|
|
||||||
|
|
||||||
|
def iterresources(filenames):
|
||||||
|
for filename in filenames:
|
||||||
|
with open(filename, 'r') as json_file:
|
||||||
|
state = json.load(json_file)
|
||||||
|
for module in state['modules']:
|
||||||
|
name = module['path'][-1]
|
||||||
|
for key, resource in module['resources'].items():
|
||||||
|
yield name, key, resource
|
||||||
|
|
||||||
|
## READ RESOURCES
|
||||||
|
PARSERS = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _clean_dc(dcname):
|
||||||
|
# Consul DCs are strictly alphanumeric with underscores and hyphens -
|
||||||
|
# ensure that the consul_dc attribute meets these requirements.
|
||||||
|
return re.sub('[^\w_\-]', '-', dcname)
|
||||||
|
|
||||||
|
|
||||||
|
def iterhosts(resources):
|
||||||
|
'''yield host tuples of (name, attributes, groups)'''
|
||||||
|
for module_name, key, resource in resources:
|
||||||
|
resource_type, name = key.split('.', 1)
|
||||||
|
try:
|
||||||
|
parser = PARSERS[resource_type]
|
||||||
|
except KeyError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield parser(resource, module_name)
|
||||||
|
|
||||||
|
|
||||||
|
def parses(prefix):
|
||||||
|
def inner(func):
|
||||||
|
PARSERS[prefix] = func
|
||||||
|
return func
|
||||||
|
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_mantl_vars(func):
|
||||||
|
"""calculate Mantl vars"""
|
||||||
|
|
||||||
|
@wraps(func)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
name, attrs, groups = func(*args, **kwargs)
|
||||||
|
|
||||||
|
# attrs
|
||||||
|
if attrs.get('role', '') == 'control':
|
||||||
|
attrs['consul_is_server'] = True
|
||||||
|
else:
|
||||||
|
attrs['consul_is_server'] = False
|
||||||
|
|
||||||
|
# groups
|
||||||
|
if attrs.get('publicly_routable', False):
|
||||||
|
groups.append('publicly_routable')
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_prefix(source, prefix, sep='.'):
|
||||||
|
for compkey, value in source.items():
|
||||||
|
try:
|
||||||
|
curprefix, rest = compkey.split(sep, 1)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if curprefix != prefix or rest == '#':
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield rest, value
|
||||||
|
|
||||||
|
|
||||||
|
def parse_attr_list(source, prefix, sep='.'):
|
||||||
|
attrs = defaultdict(dict)
|
||||||
|
for compkey, value in _parse_prefix(source, prefix, sep):
|
||||||
|
idx, key = compkey.split(sep, 1)
|
||||||
|
attrs[idx][key] = value
|
||||||
|
|
||||||
|
return attrs.values()
|
||||||
|
|
||||||
|
|
||||||
|
def parse_dict(source, prefix, sep='.'):
|
||||||
|
return dict(_parse_prefix(source, prefix, sep))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_list(source, prefix, sep='.'):
|
||||||
|
return [value for _, value in _parse_prefix(source, prefix, sep)]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bool(string_form):
|
||||||
|
token = string_form.lower()[0]
|
||||||
|
|
||||||
|
if token == 't':
|
||||||
|
return True
|
||||||
|
elif token == 'f':
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise ValueError('could not convert %r to a bool' % string_form)
|
||||||
|
|
||||||
|
|
||||||
|
@parses('triton_machine')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def triton_machine(resource, module_name):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
name = raw_attrs.get('name')
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'dataset': raw_attrs['dataset'],
|
||||||
|
'disk': raw_attrs['disk'],
|
||||||
|
'firewall_enabled': parse_bool(raw_attrs['firewall_enabled']),
|
||||||
|
'image': raw_attrs['image'],
|
||||||
|
'ips': parse_list(raw_attrs, 'ips'),
|
||||||
|
'memory': raw_attrs['memory'],
|
||||||
|
'name': raw_attrs['name'],
|
||||||
|
'networks': parse_list(raw_attrs, 'networks'),
|
||||||
|
'package': raw_attrs['package'],
|
||||||
|
'primary_ip': raw_attrs['primaryip'],
|
||||||
|
'root_authorized_keys': raw_attrs['root_authorized_keys'],
|
||||||
|
'state': raw_attrs['state'],
|
||||||
|
'tags': parse_dict(raw_attrs, 'tags'),
|
||||||
|
'type': raw_attrs['type'],
|
||||||
|
'user_data': raw_attrs['user_data'],
|
||||||
|
'user_script': raw_attrs['user_script'],
|
||||||
|
|
||||||
|
# ansible
|
||||||
|
'ansible_ssh_host': raw_attrs['primaryip'],
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'ansible_ssh_user': 'root', # it's "root" on Triton by default
|
||||||
|
|
||||||
|
# generic
|
||||||
|
'public_ipv4': raw_attrs['primaryip'],
|
||||||
|
'provider': 'triton',
|
||||||
|
}
|
||||||
|
|
||||||
|
# private IPv4
|
||||||
|
for ip in attrs['ips']:
|
||||||
|
if ip.startswith('10') or ip.startswith('192.168'): # private IPs
|
||||||
|
attrs['private_ipv4'] = ip
|
||||||
|
break
|
||||||
|
|
||||||
|
if 'private_ipv4' not in attrs:
|
||||||
|
attrs['private_ipv4'] = attrs['public_ipv4']
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['tags'].get('dc', 'none')),
|
||||||
|
'role': attrs['tags'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['tags'].get('python_bin', 'python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# add groups based on attrs
|
||||||
|
groups.append('triton_image=' + attrs['image'])
|
||||||
|
groups.append('triton_package=' + attrs['package'])
|
||||||
|
groups.append('triton_state=' + attrs['state'])
|
||||||
|
groups.append('triton_firewall_enabled=%s' % attrs['firewall_enabled'])
|
||||||
|
groups.extend('triton_tags_%s=%s' % item
|
||||||
|
for item in attrs['tags'].items())
|
||||||
|
groups.extend('triton_network=' + network
|
||||||
|
for network in attrs['networks'])
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('digitalocean_droplet')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def digitalocean_host(resource, tfvars=None):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
name = raw_attrs['name']
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'image': raw_attrs['image'],
|
||||||
|
'ipv4_address': raw_attrs['ipv4_address'],
|
||||||
|
'locked': parse_bool(raw_attrs['locked']),
|
||||||
|
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
||||||
|
'region': raw_attrs['region'],
|
||||||
|
'size': raw_attrs['size'],
|
||||||
|
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
||||||
|
'status': raw_attrs['status'],
|
||||||
|
# ansible
|
||||||
|
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'ansible_ssh_user': 'root', # it's always "root" on DO
|
||||||
|
# generic
|
||||||
|
'public_ipv4': raw_attrs['ipv4_address'],
|
||||||
|
'private_ipv4': raw_attrs.get('ipv4_address_private',
|
||||||
|
raw_attrs['ipv4_address']),
|
||||||
|
'provider': 'digitalocean',
|
||||||
|
}
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# add groups based on attrs
|
||||||
|
groups.append('do_image=' + attrs['image'])
|
||||||
|
groups.append('do_locked=%s' % attrs['locked'])
|
||||||
|
groups.append('do_region=' + attrs['region'])
|
||||||
|
groups.append('do_size=' + attrs['size'])
|
||||||
|
groups.append('do_status=' + attrs['status'])
|
||||||
|
groups.extend('do_metadata_%s=%s' % item
|
||||||
|
for item in attrs['metadata'].items())
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('softlayer_virtualserver')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def softlayer_host(resource, module_name):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
name = raw_attrs['name']
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'image': raw_attrs['image'],
|
||||||
|
'ipv4_address': raw_attrs['ipv4_address'],
|
||||||
|
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
||||||
|
'region': raw_attrs['region'],
|
||||||
|
'ram': raw_attrs['ram'],
|
||||||
|
'cpu': raw_attrs['cpu'],
|
||||||
|
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
||||||
|
'public_ipv4': raw_attrs['ipv4_address'],
|
||||||
|
'private_ipv4': raw_attrs['ipv4_address_private'],
|
||||||
|
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'ansible_ssh_user': 'root',
|
||||||
|
'provider': 'softlayer',
|
||||||
|
}
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('openstack_compute_instance_v2')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def openstack_host(resource, module_name):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
name = raw_attrs['name']
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||||
|
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||||
|
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||||
|
sep='_'),
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'image': parse_dict(raw_attrs, 'image',
|
||||||
|
sep='_'),
|
||||||
|
'key_pair': raw_attrs['key_pair'],
|
||||||
|
'metadata': parse_dict(raw_attrs, 'metadata'),
|
||||||
|
'network': parse_attr_list(raw_attrs, 'network'),
|
||||||
|
'region': raw_attrs.get('region', ''),
|
||||||
|
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
||||||
|
# ansible
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
# workaround for an OpenStack bug where hosts have a different domain
|
||||||
|
# after they're restarted
|
||||||
|
'host_domain': 'novalocal',
|
||||||
|
'use_host_domain': True,
|
||||||
|
# generic
|
||||||
|
'public_ipv4': raw_attrs['access_ip_v4'],
|
||||||
|
'private_ipv4': raw_attrs['access_ip_v4'],
|
||||||
|
'provider': 'openstack',
|
||||||
|
}
|
||||||
|
|
||||||
|
if 'floating_ip' in raw_attrs:
|
||||||
|
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
||||||
|
|
||||||
|
try:
|
||||||
|
attrs.update({
|
||||||
|
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
||||||
|
'publicly_routable': True,
|
||||||
|
})
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||||
|
|
||||||
|
# attrs specific to Ansible
|
||||||
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# add groups based on attrs
|
||||||
|
groups.append('os_image=' + attrs['image']['name'])
|
||||||
|
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||||
|
groups.extend('os_metadata_%s=%s' % item
|
||||||
|
for item in attrs['metadata'].items())
|
||||||
|
groups.append('os_region=' + attrs['region'])
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['metadata'].get('role', 'none'))
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
# groups specific to kubespray
|
||||||
|
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
|
||||||
|
groups.append(group)
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('aws_instance')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def aws_host(resource, module_name):
|
||||||
|
name = resource['primary']['attributes']['tags.Name']
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'ami': raw_attrs['ami'],
|
||||||
|
'availability_zone': raw_attrs['availability_zone'],
|
||||||
|
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
|
||||||
|
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
|
||||||
|
'ephemeral_block_device': parse_attr_list(raw_attrs,
|
||||||
|
'ephemeral_block_device'),
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'key_name': raw_attrs['key_name'],
|
||||||
|
'private': parse_dict(raw_attrs, 'private',
|
||||||
|
sep='_'),
|
||||||
|
'public': parse_dict(raw_attrs, 'public',
|
||||||
|
sep='_'),
|
||||||
|
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
|
||||||
|
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
||||||
|
'subnet': parse_dict(raw_attrs, 'subnet',
|
||||||
|
sep='_'),
|
||||||
|
'tags': parse_dict(raw_attrs, 'tags'),
|
||||||
|
'tenancy': raw_attrs['tenancy'],
|
||||||
|
'vpc_security_group_ids': parse_list(raw_attrs,
|
||||||
|
'vpc_security_group_ids'),
|
||||||
|
# ansible-specific
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'ansible_ssh_host': raw_attrs['public_ip'],
|
||||||
|
# generic
|
||||||
|
'public_ipv4': raw_attrs['public_ip'],
|
||||||
|
'private_ipv4': raw_attrs['private_ip'],
|
||||||
|
'provider': 'aws',
|
||||||
|
}
|
||||||
|
|
||||||
|
# attrs specific to Ansible
|
||||||
|
if 'tags.sshUser' in raw_attrs:
|
||||||
|
attrs['ansible_ssh_user'] = raw_attrs['tags.sshUser']
|
||||||
|
if 'tags.sshPrivateIp' in raw_attrs:
|
||||||
|
attrs['ansible_ssh_host'] = raw_attrs['private_ip']
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
|
||||||
|
'role': attrs['tags'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['tags'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.extend(['aws_ami=' + attrs['ami'],
|
||||||
|
'aws_az=' + attrs['availability_zone'],
|
||||||
|
'aws_key_name=' + attrs['key_name'],
|
||||||
|
'aws_tenancy=' + attrs['tenancy']])
|
||||||
|
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
|
||||||
|
groups.extend('aws_vpc_security_group=' + group
|
||||||
|
for group in attrs['vpc_security_group_ids'])
|
||||||
|
groups.extend('aws_subnet_%s=%s' % subnet
|
||||||
|
for subnet in attrs['subnet'].items())
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('google_compute_instance')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def gce_host(resource, module_name):
|
||||||
|
name = resource['primary']['id']
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
# network interfaces
|
||||||
|
interfaces = parse_attr_list(raw_attrs, 'network_interface')
|
||||||
|
for interface in interfaces:
|
||||||
|
interface['access_config'] = parse_attr_list(interface,
|
||||||
|
'access_config')
|
||||||
|
for key in interface.keys():
|
||||||
|
if '.' in key:
|
||||||
|
del interface[key]
|
||||||
|
|
||||||
|
# general attrs
|
||||||
|
attrs = {
|
||||||
|
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
|
||||||
|
'disks': parse_attr_list(raw_attrs, 'disk'),
|
||||||
|
'machine_type': raw_attrs['machine_type'],
|
||||||
|
'metadata': parse_dict(raw_attrs, 'metadata'),
|
||||||
|
'network': parse_attr_list(raw_attrs, 'network'),
|
||||||
|
'network_interface': interfaces,
|
||||||
|
'self_link': raw_attrs['self_link'],
|
||||||
|
'service_account': parse_attr_list(raw_attrs, 'service_account'),
|
||||||
|
'tags': parse_list(raw_attrs, 'tags'),
|
||||||
|
'zone': raw_attrs['zone'],
|
||||||
|
# ansible
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'provider': 'gce',
|
||||||
|
}
|
||||||
|
|
||||||
|
# attrs specific to Ansible
|
||||||
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
|
|
||||||
|
# attrs specific to Mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
|
attrs.update({
|
||||||
|
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
||||||
|
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
||||||
|
'private_ipv4': interfaces[0]['address'],
|
||||||
|
'publicly_routable': True,
|
||||||
|
})
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||||
|
|
||||||
|
# add groups based on attrs
|
||||||
|
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
|
||||||
|
groups.append('gce_machine_type=' + attrs['machine_type'])
|
||||||
|
groups.extend('gce_metadata_%s=%s' % (key, value)
|
||||||
|
for (key, value) in attrs['metadata'].items()
|
||||||
|
if key not in set(['sshKeys']))
|
||||||
|
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
|
||||||
|
groups.append('gce_zone=' + attrs['zone'])
|
||||||
|
|
||||||
|
if attrs['can_ip_forward']:
|
||||||
|
groups.append('gce_ip_forward')
|
||||||
|
if attrs['publicly_routable']:
|
||||||
|
groups.append('gce_publicly_routable')
|
||||||
|
|
||||||
|
# groups specific to Mantl
|
||||||
|
groups.append('role=' + attrs['metadata'].get('role', 'none'))
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('vsphere_virtual_machine')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def vsphere_host(resource, module_name):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
network_attrs = parse_dict(raw_attrs, 'network_interface')
|
||||||
|
network = parse_dict(network_attrs, '0')
|
||||||
|
ip_address = network.get('ipv4_address', network['ip_address'])
|
||||||
|
name = raw_attrs['name']
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'ip_address': ip_address,
|
||||||
|
'private_ipv4': ip_address,
|
||||||
|
'public_ipv4': ip_address,
|
||||||
|
'metadata': parse_dict(raw_attrs, 'custom_configuration_parameters'),
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'provider': 'vsphere',
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
attrs.update({
|
||||||
|
'ansible_ssh_host': ip_address,
|
||||||
|
})
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
attrs.update({'ansible_ssh_host': '', })
|
||||||
|
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||||
|
})
|
||||||
|
|
||||||
|
# attrs specific to Ansible
|
||||||
|
if 'ssh_user' in attrs['metadata']:
|
||||||
|
attrs['ansible_ssh_user'] = attrs['metadata']['ssh_user']
|
||||||
|
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
@parses('azure_instance')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def azure_host(resource, module_name):
|
||||||
|
name = resource['primary']['attributes']['name']
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
attrs = {
|
||||||
|
'automatic_updates': raw_attrs['automatic_updates'],
|
||||||
|
'description': raw_attrs['description'],
|
||||||
|
'hosted_service_name': raw_attrs['hosted_service_name'],
|
||||||
|
'id': raw_attrs['id'],
|
||||||
|
'image': raw_attrs['image'],
|
||||||
|
'ip_address': raw_attrs['ip_address'],
|
||||||
|
'location': raw_attrs['location'],
|
||||||
|
'name': raw_attrs['name'],
|
||||||
|
'reverse_dns': raw_attrs['reverse_dns'],
|
||||||
|
'security_group': raw_attrs['security_group'],
|
||||||
|
'size': raw_attrs['size'],
|
||||||
|
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
|
||||||
|
'subnet': raw_attrs['subnet'],
|
||||||
|
'username': raw_attrs['username'],
|
||||||
|
'vip_address': raw_attrs['vip_address'],
|
||||||
|
'virtual_network': raw_attrs['virtual_network'],
|
||||||
|
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
|
||||||
|
# ansible
|
||||||
|
'ansible_ssh_port': 22,
|
||||||
|
'ansible_ssh_user': raw_attrs['username'],
|
||||||
|
'ansible_ssh_host': raw_attrs['vip_address'],
|
||||||
|
}
|
||||||
|
|
||||||
|
# attrs specific to mantl
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': attrs['location'].lower().replace(" ", "-"),
|
||||||
|
'role': attrs['description']
|
||||||
|
})
|
||||||
|
|
||||||
|
# groups specific to mantl
|
||||||
|
groups.extend(['azure_image=' + attrs['image'],
|
||||||
|
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
|
||||||
|
'azure_username=' + attrs['username'],
|
||||||
|
'azure_security_group=' + attrs['security_group']])
|
||||||
|
|
||||||
|
# groups specific to mantl
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
@parses('clc_server')
|
||||||
|
@calculate_mantl_vars
|
||||||
|
def clc_server(resource, module_name):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
name = raw_attrs.get('id')
|
||||||
|
groups = []
|
||||||
|
md = parse_dict(raw_attrs, 'metadata')
|
||||||
|
attrs = {
|
||||||
|
'metadata': md,
|
||||||
|
'ansible_ssh_port': md.get('ssh_port', 22),
|
||||||
|
'ansible_ssh_user': md.get('ssh_user', 'root'),
|
||||||
|
'provider': 'clc',
|
||||||
|
'publicly_routable': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
attrs.update({
|
||||||
|
'public_ipv4': raw_attrs['public_ip_address'],
|
||||||
|
'private_ipv4': raw_attrs['private_ip_address'],
|
||||||
|
'ansible_ssh_host': raw_attrs['public_ip_address'],
|
||||||
|
'publicly_routable': True,
|
||||||
|
})
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
attrs.update({
|
||||||
|
'ansible_ssh_host': raw_attrs['private_ip_address'],
|
||||||
|
'private_ipv4': raw_attrs['private_ip_address'],
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs.update({
|
||||||
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||||
|
'role': attrs['metadata'].get('role', 'none'),
|
||||||
|
})
|
||||||
|
|
||||||
|
groups.append('role=' + attrs['role'])
|
||||||
|
groups.append('dc=' + attrs['consul_dc'])
|
||||||
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## QUERY TYPES
|
||||||
|
def query_host(hosts, target):
|
||||||
|
for name, attrs, _ in hosts:
|
||||||
|
if name == target:
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def query_list(hosts):
|
||||||
|
groups = defaultdict(dict)
|
||||||
|
meta = {}
|
||||||
|
|
||||||
|
for name, attrs, hostgroups in hosts:
|
||||||
|
for group in set(hostgroups):
|
||||||
|
groups[group].setdefault('hosts', [])
|
||||||
|
groups[group]['hosts'].append(name)
|
||||||
|
|
||||||
|
meta[name] = attrs
|
||||||
|
|
||||||
|
groups['_meta'] = {'hostvars': meta}
|
||||||
|
return groups
|
||||||
|
|
||||||
|
|
||||||
|
def query_hostfile(hosts):
|
||||||
|
out = ['## begin hosts generated by terraform.py ##']
|
||||||
|
out.extend(
|
||||||
|
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
|
||||||
|
for name, attrs, _ in hosts
|
||||||
|
)
|
||||||
|
|
||||||
|
out.append('## end hosts generated by terraform.py ##')
|
||||||
|
return '\n'.join(out)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
__file__, __doc__,
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
|
||||||
|
modes = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
modes.add_argument('--list',
|
||||||
|
action='store_true',
|
||||||
|
help='list all variables')
|
||||||
|
modes.add_argument('--host', help='list variables for a single host')
|
||||||
|
modes.add_argument('--version',
|
||||||
|
action='store_true',
|
||||||
|
help='print version and exit')
|
||||||
|
modes.add_argument('--hostfile',
|
||||||
|
action='store_true',
|
||||||
|
help='print hosts as a /etc/hosts snippet')
|
||||||
|
parser.add_argument('--pretty',
|
||||||
|
action='store_true',
|
||||||
|
help='pretty-print output JSON')
|
||||||
|
parser.add_argument('--nometa',
|
||||||
|
action='store_true',
|
||||||
|
help='with --list, exclude hostvars')
|
||||||
|
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
|
||||||
|
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
'..', '..', )))
|
||||||
|
parser.add_argument('--root',
|
||||||
|
default=default_root,
|
||||||
|
help='custom root to search for `.tfstate`s in')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.version:
|
||||||
|
print('%s %s' % (__file__, VERSION))
|
||||||
|
parser.exit()
|
||||||
|
|
||||||
|
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||||
|
if args.list:
|
||||||
|
output = query_list(hosts)
|
||||||
|
if args.nometa:
|
||||||
|
del output['_meta']
|
||||||
|
print(json.dumps(output, indent=4 if args.pretty else None))
|
||||||
|
elif args.host:
|
||||||
|
output = query_host(hosts, args.host)
|
||||||
|
print(json.dumps(output, indent=4 if args.pretty else None))
|
||||||
|
elif args.hostfile:
|
||||||
|
output = query_hostfile(hosts)
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
parser.exit()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: k8s-cluster
|
|
||||||
gather_facts: False
|
|
||||||
roles:
|
|
||||||
- coreos-bootstrap
|
|
||||||
50
docs/ansible.md
Normal file
50
docs/ansible.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
Ansible variables
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
Inventory
|
||||||
|
-------------
|
||||||
|
The inventory is composed of 3 groups:
|
||||||
|
|
||||||
|
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||||
|
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||||
|
Note: if you want the server to act both as master and node the server must be defined on both groups _kube-master_ and _kube-node_
|
||||||
|
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
||||||
|
|
||||||
|
Below is a complete inventory example:
|
||||||
|
|
||||||
|
```
|
||||||
|
## Configure 'ip' variable to bind kubernetes services on a
|
||||||
|
## different ip than the default iface
|
||||||
|
node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||||
|
node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||||
|
node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||||
|
node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||||
|
node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||||
|
node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
node4
|
||||||
|
node5
|
||||||
|
node6
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
etcd
|
||||||
|
```
|
||||||
|
|
||||||
|
Group vars
|
||||||
|
--------------
|
||||||
|
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
||||||
10
docs/aws.md
Normal file
10
docs/aws.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
AWS
|
||||||
|
===============
|
||||||
|
|
||||||
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||||
|
|
||||||
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
|
You can now create your cluster!
|
||||||
39
docs/calico.md
Normal file
39
docs/calico.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
Calico
|
||||||
|
===========
|
||||||
|
|
||||||
|
Check if the calico-node container is running
|
||||||
|
|
||||||
|
```
|
||||||
|
docker ps | grep calico
|
||||||
|
```
|
||||||
|
|
||||||
|
The **calicoctl** command allows to check the status of the network workloads.
|
||||||
|
* Check the status of Calico nodes
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl status
|
||||||
|
```
|
||||||
|
|
||||||
|
* Show the configured network subnet for containers
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl pool show
|
||||||
|
```
|
||||||
|
|
||||||
|
* Show the workloads (ip addresses of containers and their located)
|
||||||
|
|
||||||
|
```
|
||||||
|
calicoctl endpoint show --detail
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Optionnal : BGP Peering with border routers
|
||||||
|
|
||||||
|
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||||
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
|
The following variables need to be set:
|
||||||
|
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||||
|
you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
||||||
|
|
||||||
|
```
|
||||||
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
|
```
|
||||||
22
docs/cloud.md
Normal file
22
docs/cloud.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Cloud providers
|
||||||
|
==============
|
||||||
|
|
||||||
|
#### Provisioning
|
||||||
|
|
||||||
|
You can use kargo-cli to start new instances on cloud providers
|
||||||
|
here's an example
|
||||||
|
```
|
||||||
|
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Deploy kubernetes
|
||||||
|
|
||||||
|
With kargo-cli
|
||||||
|
```
|
||||||
|
kargo deploy [--aws|--gce] -u admin
|
||||||
|
```
|
||||||
|
|
||||||
|
Or ansible-playbook command
|
||||||
|
```
|
||||||
|
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
||||||
|
```
|
||||||
24
docs/coreos.md
Normal file
24
docs/coreos.md
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
CoreOS bootstrap
|
||||||
|
===============
|
||||||
|
|
||||||
|
Example with **kargo-cli**:
|
||||||
|
|
||||||
|
```
|
||||||
|
kargo deploy --gce --coreos
|
||||||
|
```
|
||||||
|
|
||||||
|
Or with Ansible:
|
||||||
|
|
||||||
|
Before running the cluster playbook you must satisfy the following requirements:
|
||||||
|
|
||||||
|
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
||||||
|
|
||||||
|
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
|
||||||
|
|
||||||
|
* run the Python bootstrap playbook
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
92
docs/dns-stack.md
Normal file
92
docs/dns-stack.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
K8s DNS stack by Kargo
|
||||||
|
======================
|
||||||
|
|
||||||
|
Kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
|
|
||||||
|
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
||||||
|
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||||
|
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
||||||
|
place, followed by external resolvers, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
skip_dnsmasq: true
|
||||||
|
nameservers: [8.8.8.8]
|
||||||
|
upstream_dns_servers: [172.18.32.6]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
skip_dnsmasq: false
|
||||||
|
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||||
|
```
|
||||||
|
The vars are explained below as well.
|
||||||
|
|
||||||
|
DNS configuration details
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Here is an approximate picture of how DNS things working and
|
||||||
|
being configured by Kargo ansible playbooks:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Note that an additional dnsmasq daemon set is installed by Kargo
|
||||||
|
by default. Kubelet will configure DNS base of all pods to use the
|
||||||
|
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
||||||
|
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
||||||
|
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
||||||
|
authoritative DNS server for the given cluser domain (and its subdomains
|
||||||
|
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
||||||
|
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
||||||
|
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
||||||
|
|
||||||
|
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
||||||
|
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
||||||
|
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
||||||
|
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
||||||
|
This mitigates existing Linux limitation of max 3 nameservers in the
|
||||||
|
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
||||||
|
clustered DNS services.
|
||||||
|
|
||||||
|
You can skip the dnsmasq daemon set install steps by setting the
|
||||||
|
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
||||||
|
the nameservers limitation. Sadly, there is no way to work around the
|
||||||
|
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
||||||
|
use the ``searchdomains`` var to define no more than a three custom domains.
|
||||||
|
Remaining three slots are reserved for K8s cluster default subdomains.
|
||||||
|
|
||||||
|
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
||||||
|
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
||||||
|
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
||||||
|
things, it comes by the price of limited nameservers though. As you know now,
|
||||||
|
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
||||||
|
specify no more than a two nameservers for infra and/or external use.
|
||||||
|
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
||||||
|
and will be merged together with the ``skydns_server`` IP into the hots'
|
||||||
|
``/etc/resolv.conf``.
|
||||||
|
|
||||||
|
Limitations
|
||||||
|
-----------
|
||||||
|
|
||||||
|
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
|
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||||
|
for details.
|
||||||
|
|
||||||
|
* There is
|
||||||
|
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||||
|
for the SkyDNS ``ndots`` param via an
|
||||||
|
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||||
|
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
||||||
|
as expected as they require the ``ndots:7``.
|
||||||
|
|
||||||
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
|
limits are a 4 names and 239 chars respectively.
|
||||||
|
|
||||||
|
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||||
|
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||||
|
see below. Anyway, the ``nameservers`` can take no more than a two
|
||||||
|
custom DNS servers because of one slot is reserved for a Kubernetes
|
||||||
|
cluster needs.
|
||||||
BIN
docs/figures/dns.jpeg
Normal file
BIN
docs/figures/dns.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 654 KiB |
BIN
docs/figures/loadbalancer_localhost.png
Normal file
BIN
docs/figures/loadbalancer_localhost.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 57 KiB |
51
docs/flannel.md
Normal file
51
docs/flannel.md
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
Flannel
|
||||||
|
==============
|
||||||
|
|
||||||
|
* Flannel configuration file should have been created there
|
||||||
|
|
||||||
|
```
|
||||||
|
cat /run/flannel/subnet.env
|
||||||
|
FLANNEL_NETWORK=10.233.0.0/18
|
||||||
|
FLANNEL_SUBNET=10.233.16.1/24
|
||||||
|
FLANNEL_MTU=1450
|
||||||
|
FLANNEL_IPMASQ=false
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check if the network interface has been created
|
||||||
|
|
||||||
|
```
|
||||||
|
ip a show dev flannel.1
|
||||||
|
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
|
||||||
|
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
|
||||||
|
inet 10.233.16.0/18 scope global flannel.1
|
||||||
|
valid_lft forever preferred_lft forever
|
||||||
|
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
|
||||||
|
valid_lft forever preferred_lft forever
|
||||||
|
```
|
||||||
|
|
||||||
|
* Docker must be configured with a bridge ip in the flannel subnet.
|
||||||
|
|
||||||
|
```
|
||||||
|
ps aux | grep docker
|
||||||
|
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
||||||
|
```
|
||||||
|
|
||||||
|
* Try to run a container and check its ip address
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl run test --image=busybox --command -- tail -f /dev/null
|
||||||
|
replicationcontroller "test" created
|
||||||
|
|
||||||
|
kubectl describe po test-34ozs | grep ^IP
|
||||||
|
IP: 10.233.16.2
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl exec test-34ozs -- ip a show dev eth0
|
||||||
|
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
|
||||||
|
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
|
||||||
|
inet 10.233.16.2/24 scope global eth0
|
||||||
|
valid_lft forever preferred_lft forever
|
||||||
|
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
|
||||||
|
valid_lft forever preferred_lft forever
|
||||||
|
```
|
||||||
19
docs/getting-started.md
Normal file
19
docs/getting-started.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Getting started
|
||||||
|
===============
|
||||||
|
|
||||||
|
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||||
|
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
||||||
|
|
||||||
|
Here is a simple example on AWS:
|
||||||
|
|
||||||
|
* Create instances and generate the inventory
|
||||||
|
|
||||||
|
```
|
||||||
|
kargo aws --instances 3
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run the deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
kargo deploy --aws -u centos -n calico
|
||||||
|
```
|
||||||
112
docs/ha-mode.md
Normal file
112
docs/ha-mode.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
HA endpoints for K8s
|
||||||
|
====================
|
||||||
|
|
||||||
|
The following components require a highly available endpoints:
|
||||||
|
* etcd cluster,
|
||||||
|
* kube-apiserver service instances.
|
||||||
|
|
||||||
|
The former provides the
|
||||||
|
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
|
||||||
|
the cluster members in HA fashion.
|
||||||
|
|
||||||
|
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
||||||
|
achieve the same goal.
|
||||||
|
|
||||||
|
Etcd
|
||||||
|
----
|
||||||
|
|
||||||
|
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
|
||||||
|
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
|
||||||
|
cluster members as backends. Note that the `access_ip` is used as the backend
|
||||||
|
IP, if specified. Frontend endpoints cannot be accessed externally as they are
|
||||||
|
bound to a localhost only.
|
||||||
|
|
||||||
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
|
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
|
||||||
|
When enabled, it makes deployed components to access the etcd cluster members
|
||||||
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
|
do a loadbalancing and handle HA for connections. Note, a pod definition of a
|
||||||
|
flannel networking plugin always uses a single `--etcd-server` endpoint!
|
||||||
|
|
||||||
|
|
||||||
|
Kube-apiserver
|
||||||
|
--------------
|
||||||
|
|
||||||
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
|
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||||
|
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||||
|
is less efficient than a dedicated load balancer because it creates extra
|
||||||
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
|
where an external LB or virtual IP management is inconvenient.
|
||||||
|
|
||||||
|
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
||||||
|
you will need to configure your own loadbalancer to achieve HA. Note that
|
||||||
|
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
||||||
|
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
||||||
|
the `access_ip` or IP address of the first server node in the `kube-master`
|
||||||
|
group. It can also configure clients to use endpoints for a given loadbalancer
|
||||||
|
type. The following diagram shows how traffic to the apiserver is directed.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
..note:: Kubernetes master nodes still use insecure localhost access because
|
||||||
|
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||||
|
services.
|
||||||
|
|
||||||
|
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||||
|
provides access for external clients, while the internal LB accepts client
|
||||||
|
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
|
||||||
|
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||||
|
an example configuration for a HAProxy service acting as an external LB:
|
||||||
|
```
|
||||||
|
listen kubernetes-apiserver-https
|
||||||
|
bind <VIP>:8383
|
||||||
|
option ssl-hello-chk
|
||||||
|
mode tcp
|
||||||
|
timeout client 3h
|
||||||
|
timeout server 3h
|
||||||
|
server master1 <IP1>:443
|
||||||
|
server master2 <IP2>:443
|
||||||
|
balance roundrobin
|
||||||
|
```
|
||||||
|
|
||||||
|
And the corresponding example global vars config:
|
||||||
|
```
|
||||||
|
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||||
|
loadbalancer_apiserver:
|
||||||
|
address: <VIP>
|
||||||
|
port: 8383
|
||||||
|
```
|
||||||
|
|
||||||
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
|
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||||
|
the HAProxy service should as well be HA and requires a VIP management, which
|
||||||
|
is out of scope of this doc.
|
||||||
|
|
||||||
|
Specifying an external LB overrides any internal localhost LB configuration.
|
||||||
|
Note that for this example, the `kubernetes-apiserver-http` endpoint
|
||||||
|
has backends receiving unencrypted traffic, which may be a security issue
|
||||||
|
when interconnecting different nodes, or maybe not, if those belong to the
|
||||||
|
isolated management network without external access.
|
||||||
|
|
||||||
|
In order to achieve HA for HAProxy instances, those must be running on the
|
||||||
|
each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||||
|
no VIP management.
|
||||||
|
|
||||||
|
Access endpoints are evaluated automagically, as the following:
|
||||||
|
|
||||||
|
| Endpoint type | kube-master | non-master |
|
||||||
|
|------------------------------|---------------|---------------------|
|
||||||
|
| Local LB | http://lc:p | http://lc:sp |
|
||||||
|
| External LB, no internal | http://lc:p | https://lb:lp |
|
||||||
|
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||||
|
|
||||||
|
Where:
|
||||||
|
* `m[0]` - the first node in the `kube-master` group;
|
||||||
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
|
* `lc` - localhost;
|
||||||
|
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||||
|
* `sp` - secure port, `kube_apiserver_port`;
|
||||||
|
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||||
|
* `ip` - the node IP, defers to the ansible IP;
|
||||||
|
* `aip` - `access_ip`, defers to the ip.
|
||||||
19
docs/large-deployments.md
Normal file
19
docs/large-deployments.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Large deployments of K8s
|
||||||
|
========================
|
||||||
|
|
||||||
|
For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
|
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||||
|
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||||
|
|
||||||
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
|
|
||||||
|
* Override the ``download_run_once: true`` to download binaries and container
|
||||||
|
images only once then push to nodes in batches.
|
||||||
|
|
||||||
|
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
||||||
|
load on a delegate (the first K8s master node) then retrying failed
|
||||||
|
push or download operations.
|
||||||
|
|
||||||
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||||
48
docs/openstack.md
Normal file
48
docs/openstack.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
OpenStack
|
||||||
|
===============
|
||||||
|
|
||||||
|
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
|
||||||
|
|
||||||
|
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` by using `source path/to/your/openstack-rc`.
|
||||||
|
|
||||||
|
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
|
||||||
|
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
|
||||||
|
|
||||||
|
Unless you are using calico you can now run the playbook.
|
||||||
|
|
||||||
|
**Additional step needed when using calico:**
|
||||||
|
|
||||||
|
Calico does not encapsulate all packages with the hosts ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
|
||||||
|
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
|
||||||
|
|
||||||
|
In order to make calico work on OpenStack you will need to tell OpenStack to allow calicos packages by allowing the network it uses.
|
||||||
|
|
||||||
|
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||||
|
|
||||||
|
nova list --tenant Your-Tenant
|
||||||
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
|
| ID | Name | Tenant ID | Status | Power State |
|
||||||
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
|
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
|
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
|
|
||||||
|
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports:
|
||||||
|
|
||||||
|
neutron port-list -c id -c device_id
|
||||||
|
+--------------------------------------+--------------------------------------+
|
||||||
|
| id | device_id |
|
||||||
|
+--------------------------------------+--------------------------------------+
|
||||||
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
|
||||||
|
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
|
||||||
|
|
||||||
|
# allow kube_service_addresses network
|
||||||
|
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
||||||
|
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
||||||
|
|
||||||
|
# allow kube_pods_subnet network
|
||||||
|
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
||||||
|
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
||||||
|
|
||||||
|
Now you can finally run the playbook.
|
||||||
87
docs/roadmap.md
Normal file
87
docs/roadmap.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
Kargo's roadmap
|
||||||
|
=================
|
||||||
|
|
||||||
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
||||||
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
|
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
|
||||||
|
- to be discussed, a way to provide the inventory
|
||||||
|
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
||||||
|
|
||||||
|
### Provisionning and cloud providers
|
||||||
|
- Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||||
|
- On AWS autoscaling, multi AZ
|
||||||
|
- On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||||
|
- On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||||
|
- **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||||
|
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||||
|
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||||
|
|
||||||
|
### Tests
|
||||||
|
- Run kubernetes e2e tests
|
||||||
|
- migrate to jenkins
|
||||||
|
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||||
|
- Full tests on GCE per day (All OS's, all network plugins)
|
||||||
|
- trigger a single test per pull request
|
||||||
|
- single test with the Ansible version n-1 per day
|
||||||
|
- Test idempotency on on single OS but for all network plugins/container engines
|
||||||
|
- single test on AWS per day
|
||||||
|
- test different achitectures :
|
||||||
|
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||||
|
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||||
|
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||||
|
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
|
||||||
|
### Lifecycle
|
||||||
|
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||||
|
- Drain worker node when shutting down/deleting an instance
|
||||||
|
|
||||||
|
### Networking
|
||||||
|
- romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||||
|
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||||
|
- Opencontrail
|
||||||
|
- Canal
|
||||||
|
|
||||||
|
### High availability
|
||||||
|
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||||
|
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
||||||
|
|
||||||
|
### Kargo-cli
|
||||||
|
- Delete instances
|
||||||
|
- `kargo vagrant` to setup a test cluster locally
|
||||||
|
- `kargo azure` for Microsoft Azure support
|
||||||
|
- switch to Terraform instead of Ansible for provisionning
|
||||||
|
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
||||||
|
|
||||||
|
### Kargo API
|
||||||
|
- Perform all actions through an **API**
|
||||||
|
- Store inventories / configurations of mulltiple clusters
|
||||||
|
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
|
|
||||||
|
### Addons (with kpm)
|
||||||
|
Include optionals deployments to init the cluster:
|
||||||
|
##### Monitoring
|
||||||
|
- Heapster / Grafana ....
|
||||||
|
- **Prometheus**
|
||||||
|
|
||||||
|
##### Others
|
||||||
|
|
||||||
|
##### Dashboards:
|
||||||
|
- kubernetes-dashboard
|
||||||
|
- Fabric8
|
||||||
|
- Tectonic
|
||||||
|
- Cockpit
|
||||||
|
|
||||||
|
##### Paas like
|
||||||
|
- Openshift Origin
|
||||||
|
- Openstack
|
||||||
|
- Deis Workflow
|
||||||
|
|
||||||
|
### Others
|
||||||
|
- remove nodes (adding is already supported)
|
||||||
|
- being able to choose any k8s version (almost done)
|
||||||
|
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
|
||||||
|
- Review documentation (split in categories)
|
||||||
|
- **consul** -> if officialy supported by k8s
|
||||||
|
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
|
||||||
|
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
|
||||||
41
docs/vagrant.md
Normal file
41
docs/vagrant.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
Vagrant Install
|
||||||
|
=================
|
||||||
|
|
||||||
|
Assuming you have Vagrant (1.8+) installed with virtualbox (it may work
|
||||||
|
with vmware, but is untested) you should be able to launch a 3 node
|
||||||
|
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||||
|
|
||||||
|
This will spin up 3 VMs and install kubernetes on them. Once they are
|
||||||
|
completed you can connect to any of them by running <br />
|
||||||
|
`$ vagrant ssh k8s-0[1..3]`.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ vagrant up
|
||||||
|
Bringing machine 'k8s-01' up with 'virtualbox' provider...
|
||||||
|
Bringing machine 'k8s-02' up with 'virtualbox' provider...
|
||||||
|
Bringing machine 'k8s-03' up with 'virtualbox' provider...
|
||||||
|
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
|
||||||
|
...
|
||||||
|
...
|
||||||
|
k8s-03: Running ansible-playbook...
|
||||||
|
|
||||||
|
PLAY [k8s-cluster] *************************************************************
|
||||||
|
|
||||||
|
TASK [setup] *******************************************************************
|
||||||
|
ok: [k8s-03]
|
||||||
|
ok: [k8s-01]
|
||||||
|
ok: [k8s-02]
|
||||||
|
...
|
||||||
|
...
|
||||||
|
PLAY RECAP *********************************************************************
|
||||||
|
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
|
||||||
|
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
|
||||||
|
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
|
||||||
|
|
||||||
|
$ vagrant ssh k8s-01
|
||||||
|
vagrant@k8s-01:~$ kubectl get nodes
|
||||||
|
NAME STATUS AGE
|
||||||
|
k8s-01 Ready 45s
|
||||||
|
k8s-02 Ready 45s
|
||||||
|
k8s-03 Ready 45s
|
||||||
|
```
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
|
# Valid bootstrap options (required): xenial, coreos, none
|
||||||
|
bootstrap_os: none
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
local_release_dir: "/tmp/releases"
|
local_release_dir: "/tmp/releases"
|
||||||
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
|
retry_stagger: 5
|
||||||
|
|
||||||
# Uncomment this line for CoreOS only.
|
# Uncomment this line for CoreOS only.
|
||||||
# Directory where python binary is installed
|
# Directory where python binary is installed
|
||||||
@@ -17,16 +22,19 @@ kube_cert_group: kube-cert
|
|||||||
kube_log_level: 2
|
kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
|
kube_api_pwd: "changeme"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: changeme
|
pass: "{{kube_api_pwd}}"
|
||||||
|
role: admin
|
||||||
|
root:
|
||||||
|
pass: "changeme"
|
||||||
role: admin
|
role: admin
|
||||||
# root:
|
|
||||||
# pass: changeme
|
|
||||||
# role: admin
|
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||||
|
ndots: 5
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
# For some environments, each node has a pubilcally accessible
|
||||||
# address and an address it should bind services to. These are
|
# address and an address it should bind services to. These are
|
||||||
@@ -50,6 +58,16 @@ cluster_name: cluster.local
|
|||||||
# but don't know about that address themselves.
|
# but don't know about that address themselves.
|
||||||
# access_ip: 1.1.1.1
|
# access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
# Etcd access modes:
|
||||||
|
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||||
|
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||||
|
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||||
|
etcd_multiaccess: false
|
||||||
|
|
||||||
|
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||||
|
# kube_apiserver_port (default 443)
|
||||||
|
loadbalancer_apiserver_localhost: true
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico, weave or flannel)
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
|
|
||||||
@@ -88,10 +106,12 @@ kube_apiserver_insecure_port: 8080 # (http)
|
|||||||
# You still must manually configure all your containers to use this DNS server,
|
# You still must manually configure all your containers to use this DNS server,
|
||||||
# Kubernetes won't do this for you (yet).
|
# Kubernetes won't do this for you (yet).
|
||||||
|
|
||||||
|
# Do not install additional dnsmasq
|
||||||
|
skip_dnsmasq: false
|
||||||
# Upstream dns servers used by dnsmasq
|
# Upstream dns servers used by dnsmasq
|
||||||
upstream_dns_servers:
|
#upstream_dns_servers:
|
||||||
- 8.8.8.8
|
# - 8.8.8.8
|
||||||
- 4.4.8.8
|
# - 8.8.4.4
|
||||||
#
|
#
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||||
dns_setup: true
|
dns_setup: true
|
||||||
@@ -101,17 +121,24 @@ dns_domain: "{{ cluster_name }}"
|
|||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
|
|
||||||
# For multi masters architecture:
|
# There are some changes specific to the cloud providers
|
||||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
# for instance we need to encapsulate packets with some network plugins
|
||||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||||
# configuration example with haproxy :
|
# When openstack is used make sure to source in the openstack credentials
|
||||||
# listen kubernetes-apiserver-https
|
# like you would do when using nova-client before starting the playbook.
|
||||||
# bind 10.99.0.21:8383
|
# cloud_provider:
|
||||||
# option ssl-hello-chk
|
|
||||||
# mode tcp
|
## Set these proxy values in order to update docker daemon to use proxies
|
||||||
# timeout client 3h
|
# http_proxy: ""
|
||||||
# timeout server 3h
|
# https_proxy: ""
|
||||||
# server master1 10.99.0.26:443
|
# no_proxy: ""
|
||||||
# server master2 10.99.0.27:443
|
|
||||||
# balance roundrobin
|
## A string of extra options to pass to the docker daemon.
|
||||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
## This string should be exactly as you wish it to appear.
|
||||||
|
## An obvious use case is allowing insecure-registry access
|
||||||
|
## to self hosted registries like so:
|
||||||
|
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||||
|
|
||||||
|
# default packages to install within the cluster
|
||||||
|
kpm_packages: []
|
||||||
|
# - name: kube-system/grafana
|
||||||
|
|||||||
@@ -1,29 +1,29 @@
|
|||||||
[kube-master]
|
#[kube-master]
|
||||||
node1 ansible_ssh_host=10.99.0.26
|
#node1 ansible_ssh_host=10.99.0.26
|
||||||
node2 ansible_ssh_host=10.99.0.27
|
#node2 ansible_ssh_host=10.99.0.27
|
||||||
|
#
|
||||||
[etcd]
|
#[etcd]
|
||||||
node1 ansible_ssh_host=10.99.0.26
|
#node1 ansible_ssh_host=10.99.0.26
|
||||||
node2 ansible_ssh_host=10.99.0.27
|
#node2 ansible_ssh_host=10.99.0.27
|
||||||
node3 ansible_ssh_host=10.99.0.4
|
#node3 ansible_ssh_host=10.99.0.4
|
||||||
|
#
|
||||||
[kube-node]
|
#[kube-node]
|
||||||
node2 ansible_ssh_host=10.99.0.27
|
#node2 ansible_ssh_host=10.99.0.27
|
||||||
node3 ansible_ssh_host=10.99.0.4
|
#node3 ansible_ssh_host=10.99.0.4
|
||||||
node4 ansible_ssh_host=10.99.0.5
|
#node4 ansible_ssh_host=10.99.0.5
|
||||||
node5 ansible_ssh_host=10.99.0.36
|
#node5 ansible_ssh_host=10.99.0.36
|
||||||
node6 ansible_ssh_host=10.99.0.37
|
#node6 ansible_ssh_host=10.99.0.37
|
||||||
|
#
|
||||||
[paris]
|
#[paris]
|
||||||
node1 ansible_ssh_host=10.99.0.26
|
#node1 ansible_ssh_host=10.99.0.26
|
||||||
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
#node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||||
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
#node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||||
|
#
|
||||||
[new-york]
|
#[new-york]
|
||||||
node2 ansible_ssh_host=10.99.0.27
|
#node2 ansible_ssh_host=10.99.0.27
|
||||||
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
#node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||||
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
#node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||||
|
#
|
||||||
[k8s-cluster:children]
|
#[k8s-cluster:children]
|
||||||
kube-node
|
#kube-node
|
||||||
kube-master
|
#kube-master
|
||||||
|
|||||||
2
requirements.txt
Normal file
2
requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ansible
|
||||||
|
netaddr
|
||||||
@@ -3,50 +3,46 @@
|
|||||||
path: roles/apps
|
path: roles/apps
|
||||||
scm: git
|
scm: git
|
||||||
|
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-dashboard.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-ui.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-fabric8.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
#
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
||||||
path: roles/apps
|
# path: roles/apps
|
||||||
scm: git
|
# scm: git
|
||||||
|
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
|
||||||
path: roles/apps
|
|
||||||
scm: git
|
|
||||||
|
|||||||
24
roles/adduser/defaults/main.yml
Normal file
24
roles/adduser/defaults/main.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
addusers:
|
||||||
|
etcd:
|
||||||
|
name: etcd
|
||||||
|
comment: "Etcd user"
|
||||||
|
createhome: yes
|
||||||
|
home: "/var/lib/etcd"
|
||||||
|
system: yes
|
||||||
|
shell: /bin/nologin
|
||||||
|
kube:
|
||||||
|
name: kube
|
||||||
|
comment: "Kubernetes user"
|
||||||
|
shell: /sbin/nologin
|
||||||
|
system: yes
|
||||||
|
group: "{{ kube_cert_group }}"
|
||||||
|
createhome: no
|
||||||
|
|
||||||
|
adduser:
|
||||||
|
name: "{{ user.name }}"
|
||||||
|
group: "{{ user.name|default(None) }}"
|
||||||
|
comment: "{{ user.comment|default(None) }}"
|
||||||
|
shell: "{{ user.shell|default(None) }}"
|
||||||
|
system: "{{ user.system|default(None) }}"
|
||||||
|
createhome: "{{ user.createhome|default(None) }}"
|
||||||
@@ -1,28 +1,13 @@
|
|||||||
---
|
---
|
||||||
- name: gather os specific variables
|
|
||||||
include_vars: "{{ item }}"
|
|
||||||
with_first_found:
|
|
||||||
- files:
|
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
|
||||||
- "{{ ansible_distribution|lower }}.yml"
|
|
||||||
- "{{ ansible_os_family|lower }}.yml"
|
|
||||||
- defaults.yml
|
|
||||||
paths:
|
|
||||||
- ../vars
|
|
||||||
skip: true
|
|
||||||
|
|
||||||
- name: User | Create User Group
|
- name: User | Create User Group
|
||||||
group: name={{item.group|default(item.name)}} system={{item.system|default(omit)}}
|
group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}}
|
||||||
with_items: addusers
|
|
||||||
|
|
||||||
- name: User | Create User
|
- name: User | Create User
|
||||||
user:
|
user:
|
||||||
comment: "{{item.comment|default(omit)}}"
|
comment: "{{user.comment|default(omit)}}"
|
||||||
createhome: "{{item.create_home|default(omit)}}"
|
createhome: "{{user.create_home|default(omit)}}"
|
||||||
group: "{{item.group|default(item.name)}}"
|
group: "{{user.group|default(user.name)}}"
|
||||||
home: "{{item.home|default(omit)}}"
|
home: "{{user.home|default(omit)}}"
|
||||||
name: "{{item.name}}"
|
shell: "{{user.shell|default(omit)}}"
|
||||||
system: "{{item.system|default(omit)}}"
|
name: "{{user.name}}"
|
||||||
with_items: addusers
|
system: "{{user.system|default(omit)}}"
|
||||||
|
|||||||
@@ -3,13 +3,15 @@ set -e
|
|||||||
|
|
||||||
BINDIR="/opt/bin"
|
BINDIR="/opt/bin"
|
||||||
|
|
||||||
|
mkdir -p $BINDIR
|
||||||
|
|
||||||
cd $BINDIR
|
cd $BINDIR
|
||||||
|
|
||||||
if [[ -e $BINDIR/.bootstrapped ]]; then
|
if [[ -e $BINDIR/.bootstrapped ]]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PYPY_VERSION=2.4.0
|
PYPY_VERSION=5.1.0
|
||||||
|
|
||||||
wget -O - https://bitbucket.org/pypy/pypy/downloads/pypy-$PYPY_VERSION-linux64.tar.bz2 |tar -xjf -
|
wget -O - https://bitbucket.org/pypy/pypy/downloads/pypy-$PYPY_VERSION-linux64.tar.bz2 |tar -xjf -
|
||||||
mv -n pypy-$PYPY_VERSION-linux64 pypy
|
mv -n pypy-$PYPY_VERSION-linux64 pypy
|
||||||
@@ -4,9 +4,10 @@
|
|||||||
register: need_bootstrap
|
register: need_bootstrap
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
|
|
||||||
|
|
||||||
- name: Bootstrap | Run bootstrap.sh
|
- name: Bootstrap | Run bootstrap.sh
|
||||||
script: bootstrap.sh
|
script: bootstrap.sh
|
||||||
when: need_bootstrap | failed
|
when: (need_bootstrap | failed)
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/opt/bin/python"
|
ansible_python_interpreter: "/opt/bin/python"
|
||||||
@@ -16,25 +17,33 @@
|
|||||||
register: need_pip
|
register: need_pip
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: need_bootstrap | failed
|
when: (need_bootstrap | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Copy get-pip.py
|
- name: Bootstrap | Copy get-pip.py
|
||||||
copy: src=get-pip.py dest=~/get-pip.py
|
copy: src=get-pip.py dest=~/get-pip.py
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Install pip
|
- name: Bootstrap | Install pip
|
||||||
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Remove get-pip.py
|
- name: Bootstrap | Remove get-pip.py
|
||||||
file: path=~/get-pip.py state=absent
|
file: path=~/get-pip.py state=absent
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Bootstrap | Install pip launcher
|
- name: Bootstrap | Install pip launcher
|
||||||
copy: src=runner dest=/opt/bin/pip mode=0755
|
copy: src=runner dest=/opt/bin/pip mode=0755
|
||||||
when: need_pip | failed
|
when: (need_pip | failed)
|
||||||
|
|
||||||
- name: Install required python modules
|
- name: Install required python modules
|
||||||
pip:
|
pip:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
with_items: pip_python_modules
|
with_items: "{{pip_python_modules}}"
|
||||||
|
|
||||||
|
- name: Check configured hostname
|
||||||
|
shell: hostname
|
||||||
|
register: configured_hostname
|
||||||
|
|
||||||
|
- name: Assign inventory name to unconfigured hostnames
|
||||||
|
shell: sh -c "echo \"{{inventory_hostname}}\" > /etc/hostname; hostname \"{{inventory_hostname}}\""
|
||||||
|
when: (configured_hostname.stdout == 'localhost')
|
||||||
14
roles/bootstrap-os/tasks/bootstrap-ubuntu.yml
Normal file
14
roles/bootstrap-os/tasks/bootstrap-ubuntu.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
# raw: cat /etc/issue.net | grep '{{ bootstrap_versions }}'
|
||||||
|
|
||||||
|
- name: Bootstrap | Check if bootstrap is needed
|
||||||
|
raw: which python
|
||||||
|
register: need_bootstrap
|
||||||
|
ignore_errors: True
|
||||||
|
|
||||||
|
- name: Bootstrap | Install python 2.x
|
||||||
|
raw: DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||||
|
when: need_bootstrap | failed
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
ansible_python_interpreter: "/usr/bin/python"
|
||||||
6
roles/bootstrap-os/tasks/main.yml
Normal file
6
roles/bootstrap-os/tasks/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- include: bootstrap-ubuntu.yml
|
||||||
|
when: bootstrap_os == "ubuntu"
|
||||||
|
|
||||||
|
- include: bootstrap-coreos.yml
|
||||||
|
when: bootstrap_os == "coreos"
|
||||||
12
roles/dnsmasq/defaults/main.yml
Normal file
12
roles/dnsmasq/defaults/main.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
# Existing search/nameserver resolvconf entries will be purged and
|
||||||
|
# ensured by this additional data:
|
||||||
|
|
||||||
|
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
||||||
|
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
||||||
|
#searchdomains:
|
||||||
|
# - foo.bar.lc
|
||||||
|
|
||||||
|
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||||
|
#nameservers:
|
||||||
|
# - 127.0.0.1
|
||||||
34
roles/dnsmasq/handlers/main.yml
Normal file
34
roles/dnsmasq/handlers/main.yml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
- name: Dnsmasq | restart network
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Dnsmasq | reload network
|
||||||
|
- Dnsmasq | update resolvconf
|
||||||
|
when: ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload network
|
||||||
|
service:
|
||||||
|
name: >-
|
||||||
|
{% if ansible_os_family == "RedHat" -%}
|
||||||
|
network
|
||||||
|
{%- elif ansible_os_family == "Debian" -%}
|
||||||
|
networking
|
||||||
|
{%- endif %}
|
||||||
|
state: restarted
|
||||||
|
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
|
- name: Dnsmasq | update resolvconf
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Dnsmasq | reload resolvconf
|
||||||
|
- Dnsmasq | reload kubelet
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload resolvconf
|
||||||
|
command: /sbin/resolvconf -u
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Dnsmasq | reload kubelet
|
||||||
|
service:
|
||||||
|
name: kubelet
|
||||||
|
state: restarted
|
||||||
|
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
||||||
|
ignore_errors: true
|
||||||
@@ -44,12 +44,6 @@ options:
|
|||||||
default: null
|
default: null
|
||||||
description:
|
description:
|
||||||
- The url for the API server that commands are executed against.
|
- The url for the API server that commands are executed against.
|
||||||
api_version:
|
|
||||||
required: false
|
|
||||||
choices: ['v1', 'v1beta3']
|
|
||||||
default: v1
|
|
||||||
description:
|
|
||||||
- The API version associated with cluster.
|
|
||||||
force:
|
force:
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
@@ -105,10 +99,6 @@ class KubeManager(object):
|
|||||||
if self.kubectl is None:
|
if self.kubectl is None:
|
||||||
self.kubectl = module.get_bin_path('kubectl', True)
|
self.kubectl = module.get_bin_path('kubectl', True)
|
||||||
self.base_cmd = [self.kubectl]
|
self.base_cmd = [self.kubectl]
|
||||||
self.api_version = module.params.get('api_version')
|
|
||||||
|
|
||||||
if self.api_version:
|
|
||||||
self.base_cmd.append('--api-version=' + self.api_version)
|
|
||||||
|
|
||||||
if module.params.get('server'):
|
if module.params.get('server'):
|
||||||
self.base_cmd.append('--server=' + module.params.get('server'))
|
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||||
@@ -164,8 +154,6 @@ class KubeManager(object):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
cmd = ['replace']
|
cmd = ['replace']
|
||||||
if self.api_version != 'v1':
|
|
||||||
cmd = ['update']
|
|
||||||
|
|
||||||
if self.force:
|
if self.force:
|
||||||
cmd.append('--force')
|
cmd.append('--force')
|
||||||
@@ -271,7 +259,6 @@ def main():
|
|||||||
label=dict(),
|
label=dict(),
|
||||||
server=dict(),
|
server=dict(),
|
||||||
kubectl=dict(),
|
kubectl=dict(),
|
||||||
api_version=dict(default='v1', choices=['v1', 'v1beta3']),
|
|
||||||
force=dict(default=False, type='bool'),
|
force=dict(default=False, type='bool'),
|
||||||
all=dict(default=False, type='bool'),
|
all=dict(default=False, type='bool'),
|
||||||
log_level=dict(default=0, type='int'),
|
log_level=dict(default=0, type='int'),
|
||||||
|
|||||||
58
roles/dnsmasq/tasks/dnsmasq.yml
Normal file
58
roles/dnsmasq/tasks/dnsmasq.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- name: ensure dnsmasq.d directory exists
|
||||||
|
file:
|
||||||
|
path: /etc/dnsmasq.d
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: ensure dnsmasq.d-available directory exists
|
||||||
|
file:
|
||||||
|
path: /etc/dnsmasq.d-available
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Write dnsmasq configuration
|
||||||
|
template:
|
||||||
|
src: 01-kube-dns.conf.j2
|
||||||
|
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
mode: 0755
|
||||||
|
backup: yes
|
||||||
|
|
||||||
|
- name: Stat dnsmasq configuration
|
||||||
|
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
register: sym
|
||||||
|
|
||||||
|
- name: Move previous configuration
|
||||||
|
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||||
|
changed_when: False
|
||||||
|
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||||
|
|
||||||
|
- name: Enable dnsmasq configuration
|
||||||
|
file:
|
||||||
|
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||||
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Create dnsmasq manifests
|
||||||
|
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||||
|
with_items:
|
||||||
|
- {file: dnsmasq-ds.yml, type: ds}
|
||||||
|
- {file: dnsmasq-svc.yml, type: svc}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Start Resources
|
||||||
|
kube:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: kube-system
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: /etc/kubernetes/{{item.item.file}}
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Check for dnsmasq port (pulling image and running container)
|
||||||
|
wait_for:
|
||||||
|
host: "{{dns_server}}"
|
||||||
|
port: 53
|
||||||
|
delay: 5
|
||||||
|
when: inventory_hostname == groups['kube-node'][0]
|
||||||
@@ -1,114 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: ensure dnsmasq.d directory exists
|
- include: dnsmasq.yml
|
||||||
file:
|
when: "{{ not skip_dnsmasq|bool }}"
|
||||||
path: /etc/dnsmasq.d
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d-available directory exists
|
- include: resolvconf.yml
|
||||||
file:
|
|
||||||
path: /etc/dnsmasq.d-available
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Write dnsmasq configuration
|
|
||||||
template:
|
|
||||||
src: 01-kube-dns.conf.j2
|
|
||||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
mode: 0755
|
|
||||||
backup: yes
|
|
||||||
|
|
||||||
- name: Stat dnsmasq configuration
|
|
||||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
register: sym
|
|
||||||
|
|
||||||
- name: Move previous configuration
|
|
||||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
|
||||||
changed_when: False
|
|
||||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
|
||||||
|
|
||||||
- name: Enable dnsmasq configuration
|
|
||||||
file:
|
|
||||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
|
||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
|
||||||
state: link
|
|
||||||
|
|
||||||
- name: Create dnsmasq manifests
|
|
||||||
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
|
||||||
with_items:
|
|
||||||
- {file: dnsmasq-ds.yml, type: ds}
|
|
||||||
- {file: dnsmasq-svc.yml, type: svc}
|
|
||||||
register: manifests
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Start Resources
|
|
||||||
kube:
|
|
||||||
name: dnsmasq
|
|
||||||
namespace: kube-system
|
|
||||||
kubectl: /usr/local/bin/kubectl
|
|
||||||
resource: "{{item.item.type}}"
|
|
||||||
filename: /etc/kubernetes/{{item.item.file}}
|
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
|
||||||
with_items: manifests.results
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Check for dnsmasq port (pulling image and running container)
|
|
||||||
wait_for:
|
|
||||||
host: "{{dns_server}}"
|
|
||||||
port: 53
|
|
||||||
delay: 5
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
|
|
||||||
- name: check resolvconf
|
|
||||||
stat: path=/etc/resolvconf/resolv.conf.d/head
|
|
||||||
register: resolvconf
|
|
||||||
|
|
||||||
- name: target resolv.conf file
|
|
||||||
set_fact:
|
|
||||||
resolvconffile: >-
|
|
||||||
{%- if resolvconf.stat.exists == True -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
|
||||||
|
|
||||||
- name: Add search resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: "search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}"
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
insertbefore: BOF
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
|
|
||||||
- name: Add local dnsmasq to resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: "nameserver {{dns_server}}"
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
insertafter: "^search.*$"
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
|
|
||||||
- name: Add options to resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
line: options {{ item }}
|
|
||||||
dest: "{{resolvconffile}}"
|
|
||||||
state: present
|
|
||||||
regexp: "^options.*{{ item }}$"
|
|
||||||
insertafter: EOF
|
|
||||||
backup: yes
|
|
||||||
follow: yes
|
|
||||||
with_items:
|
|
||||||
- timeout:2
|
|
||||||
- attempts:2
|
|
||||||
|
|
||||||
- name: disable resolv.conf modification by dhclient
|
|
||||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=0755 backup=yes
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
- name: disable resolv.conf modification by dhclient
|
|
||||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
|
|
||||||
when: ansible_os_family == "RedHat"
|
|
||||||
|
|
||||||
- name: update resolvconf
|
|
||||||
command: resolvconf -u
|
|
||||||
changed_when: False
|
|
||||||
when: resolvconf.stat.exists == True
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
|
||||||
|
|||||||
101
roles/dnsmasq/tasks/resolvconf.yml
Normal file
101
roles/dnsmasq/tasks/resolvconf.yml
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
---
|
||||||
|
- name: check resolvconf
|
||||||
|
shell: which resolvconf
|
||||||
|
register: resolvconf
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: target resolv.conf file
|
||||||
|
set_fact:
|
||||||
|
resolvconffile: >-
|
||||||
|
{%- if resolvconf.rc == 0 -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
||||||
|
|
||||||
|
- name: generate search domains to resolvconf
|
||||||
|
set_fact:
|
||||||
|
searchentries:
|
||||||
|
"{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}"
|
||||||
|
|
||||||
|
- name: pick dnsmasq cluster IP
|
||||||
|
set_fact:
|
||||||
|
dnsmasq_server: >-
|
||||||
|
{%- if skip_dnsmasq|bool -%}{{ [ skydns_server ] + upstream_dns_servers|default([]) }}{%- else -%}{{ [ dns_server ] }}{%- endif -%}
|
||||||
|
|
||||||
|
- name: generate nameservers to resolvconf
|
||||||
|
set_fact:
|
||||||
|
nameserverentries:
|
||||||
|
"{{ dnsmasq_server|default([]) + nameservers|default([]) }}"
|
||||||
|
|
||||||
|
- name: Remove search and nameserver options from resolvconf head
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/resolvconf/resolv.conf.d/head
|
||||||
|
state: absent
|
||||||
|
regexp: "^{{ item }}.*$"
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- search
|
||||||
|
- nameserver
|
||||||
|
when: resolvconf.rc == 0
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add search domains to resolv.conf
|
||||||
|
lineinfile:
|
||||||
|
line: "search {{searchentries}}"
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
insertbefore: BOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add nameservers to resolv.conf
|
||||||
|
blockinfile:
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
block: |-
|
||||||
|
{% for item in nameserverentries -%}
|
||||||
|
nameserver {{ item }}
|
||||||
|
{% endfor %}
|
||||||
|
state: present
|
||||||
|
insertafter: "^search.*$"
|
||||||
|
create: yes
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
marker: "# Ansible nameservers {mark}"
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Add options to resolv.conf
|
||||||
|
lineinfile:
|
||||||
|
line: options {{ item }}
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
regexp: "^options.*{{ item }}$"
|
||||||
|
insertafter: EOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- ndots:{{ ndots }}
|
||||||
|
- timeout:2
|
||||||
|
- attempts:2
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: Remove search and nameserver options from resolvconf base
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/resolvconf/resolv.conf.d/base
|
||||||
|
state: absent
|
||||||
|
regexp: "^{{ item }}.*$"
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items:
|
||||||
|
- search
|
||||||
|
- nameserver
|
||||||
|
when: resolvconf.rc == 0
|
||||||
|
notify: Dnsmasq | update resolvconf
|
||||||
|
|
||||||
|
- name: disable resolv.conf modification by dhclient
|
||||||
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/znodnsupdate mode=0755
|
||||||
|
notify: Dnsmasq | restart network
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: disable resolv.conf modification by dhclient
|
||||||
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x
|
||||||
|
notify: Dnsmasq | restart network
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
@@ -4,17 +4,26 @@ listen-address=0.0.0.0
|
|||||||
|
|
||||||
addn-hosts=/etc/hosts
|
addn-hosts=/etc/hosts
|
||||||
|
|
||||||
bogus-priv
|
strict-order
|
||||||
|
# Forward k8s domain to kube-dns
|
||||||
|
server=/{{ dns_domain }}/{{ skydns_server }}
|
||||||
|
|
||||||
#Set upstream dns servers
|
#Set upstream dns servers
|
||||||
{% if upstream_dns_servers is defined %}
|
{% if upstream_dns_servers is defined %}
|
||||||
{% for srv in upstream_dns_servers %}
|
{% for srv in upstream_dns_servers %}
|
||||||
server={{ srv }}
|
server={{ srv }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% elif cloud_provider == "gce" %}
|
||||||
|
server=169.254.169.254
|
||||||
{% else %}
|
{% else %}
|
||||||
server=8.8.8.8
|
server=8.8.8.8
|
||||||
server=8.8.4.4
|
server=8.8.4.4
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Forward k8s domain to kube-dns
|
bogus-priv
|
||||||
server=/{{ dns_domain }}/{{ skydns_server }}
|
no-resolv
|
||||||
|
no-negcache
|
||||||
|
cache-size=1000
|
||||||
|
max-cache-ttl=10
|
||||||
|
max-ttl=20
|
||||||
|
log-facility=-
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ spec:
|
|||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
@@ -50,3 +50,4 @@ spec:
|
|||||||
- name: etcdnsmasqdavailable
|
- name: etcdnsmasqdavailable
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /etc/dnsmasq.d-available
|
path: /etc/dnsmasq.d-available
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
|||||||
@@ -1 +1,10 @@
|
|||||||
docker_version: 1.10
|
docker_version: 1.10
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkgs:
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
repo_keys:
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
repos:
|
||||||
|
|||||||
6
roles/docker/files/rh_docker.repo
Normal file
6
roles/docker/files/rh_docker.repo
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
[dockerrepo]
|
||||||
|
name=Docker Repository
|
||||||
|
baseurl=https://yum.dockerproject.org/repo/main/centos/7
|
||||||
|
enabled=1
|
||||||
|
gpgcheck=1
|
||||||
|
gpgkey=https://yum.dockerproject.org/gpg
|
||||||
27
roles/docker/handlers/main.yml
Normal file
27
roles/docker/handlers/main.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- name: restart docker
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- Docker | reload systemd
|
||||||
|
- Docker | reload docker
|
||||||
|
- Docker | pause while Docker restarts
|
||||||
|
- Docker | wait for docker
|
||||||
|
|
||||||
|
- name : Docker | reload systemd
|
||||||
|
shell: systemctl daemon-reload
|
||||||
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
|
- name: Docker | reload docker
|
||||||
|
service:
|
||||||
|
name: docker
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: Docker | pause while Docker restarts
|
||||||
|
pause: seconds=10 prompt="Waiting for docker restart"
|
||||||
|
|
||||||
|
- name: Docker | wait for docker
|
||||||
|
command: /usr/bin/docker images
|
||||||
|
register: docker_ready
|
||||||
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
until: docker_ready.rc == 0
|
||||||
@@ -19,8 +19,7 @@
|
|||||||
docker requires a minimum kernel version of
|
docker requires a minimum kernel version of
|
||||||
{{ docker_kernel_min_version }} on
|
{{ docker_kernel_min_version }} on
|
||||||
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||||
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
|
when: (ansible_os_family != "CoreOS") and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||||
|
|
||||||
|
|
||||||
- name: ensure docker repository public key is installed
|
- name: ensure docker repository public key is installed
|
||||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||||
@@ -28,32 +27,53 @@
|
|||||||
id: "{{item}}"
|
id: "{{item}}"
|
||||||
keyserver: "{{docker_repo_key_info.keyserver}}"
|
keyserver: "{{docker_repo_key_info.keyserver}}"
|
||||||
state: present
|
state: present
|
||||||
with_items: docker_repo_key_info.repo_keys
|
register: keyserver_task_result
|
||||||
|
until: keyserver_task_result|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||||
|
when: ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
- name: ensure docker repository is enabled
|
- name: ensure docker repository is enabled
|
||||||
action: "{{ docker_repo_info.pkg_repo }}"
|
action: "{{ docker_repo_info.pkg_repo }}"
|
||||||
args:
|
args:
|
||||||
repo: "{{item}}"
|
repo: "{{item}}"
|
||||||
state: present
|
state: present
|
||||||
with_items: docker_repo_info.repos
|
with_items: "{{ docker_repo_info.repos }}"
|
||||||
when: docker_repo_info.repos|length > 0
|
when: (ansible_os_family != "CoreOS") and (docker_repo_info.repos|length > 0)
|
||||||
|
|
||||||
|
- name: Configure docker repository on RedHat/CentOS
|
||||||
|
copy:
|
||||||
|
src: "rh_docker.repo"
|
||||||
|
dest: "/etc/yum.repos.d/docker.repo"
|
||||||
|
when: ansible_distribution in ["CentOS","RedHat"] and
|
||||||
|
ansible_distribution_major_version >= 7
|
||||||
|
|
||||||
- name: ensure docker packages are installed
|
- name: ensure docker packages are installed
|
||||||
action: "{{ docker_package_info.pkg_mgr }}"
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
args:
|
args:
|
||||||
pkg: "{{item}}"
|
pkg: "{{item.name}}"
|
||||||
|
force: "{{item.force|default(omit)}}"
|
||||||
state: present
|
state: present
|
||||||
with_items: docker_package_info.pkgs
|
register: docker_task_result
|
||||||
when: docker_package_info.pkgs|length > 0
|
until: docker_task_result|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items: "{{ docker_package_info.pkgs }}"
|
||||||
|
when: (ansible_os_family != "CoreOS") and (docker_package_info.pkgs|length > 0)
|
||||||
|
|
||||||
- name: Centos needs xfs storage type for devicemapper if used
|
- name: allow for proxies on systems using systemd
|
||||||
lineinfile:
|
include: systemd-proxies.yml
|
||||||
dest: /etc/sysconfig/docker-storage
|
when: ansible_service_mgr == "systemd" and
|
||||||
line: "DOCKER_STORAGE_OPTIONS='--storage-opt dm.fs=xfs'"
|
(http_proxy is defined or https_proxy is defined or no_proxy is defined)
|
||||||
regexp: '^DOCKER_STORAGE_OPTIONS=.*$'
|
|
||||||
state: present
|
- name: Write docker.service systemd file
|
||||||
backup: yes
|
template:
|
||||||
when: ansible_os_family == "RedHat"
|
src: systemd-docker.service.j2
|
||||||
|
dest: /etc/systemd/system/docker.service
|
||||||
|
register: docker_service_file
|
||||||
|
notify: restart docker
|
||||||
|
when: ansible_service_mgr == "systemd" and ansible_os_family != "CoreOS"
|
||||||
|
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
|||||||
9
roles/docker/tasks/systemd-proxies.yml
Normal file
9
roles/docker/tasks/systemd-proxies.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: create docker service directory for systemd
|
||||||
|
file: path=/etc/systemd/system/docker.service.d state=directory
|
||||||
|
|
||||||
|
- name: drop docker environment conf to enable proxy usage
|
||||||
|
template:
|
||||||
|
src: http-proxy.conf.j2
|
||||||
|
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
|
notify: restart docker
|
||||||
3
roles/docker/templates/http-proxy.conf.j2
Normal file
3
roles/docker/templates/http-proxy.conf.j2
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[Service]
|
||||||
|
|
||||||
|
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
||||||
@@ -2,16 +2,17 @@
|
|||||||
Description=Docker Application Container Engine
|
Description=Docker Application Container Engine
|
||||||
Documentation=http://docs.docker.com
|
Documentation=http://docs.docker.com
|
||||||
{% if ansible_os_family == "RedHat" %}
|
{% if ansible_os_family == "RedHat" %}
|
||||||
After=network.target
|
After=network.target docker-storage-setup.service
|
||||||
Wants=docker-storage-setup.service
|
Wants=docker-storage-setup.service
|
||||||
{% elif ansible_os_family == "Debian" %}
|
{% elif ansible_os_family == "Debian" %}
|
||||||
After=network.target docker.socket
|
After=network.target docker.socket
|
||||||
Requires=docker.socket
|
Wants=docker.socket
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=notify
|
Type=notify
|
||||||
{% if ansible_os_family == "RedHat" %}
|
{% if ansible_os_family == "RedHat" %}
|
||||||
|
EnvironmentFile=-/etc/default/docker
|
||||||
EnvironmentFile=-/etc/sysconfig/docker
|
EnvironmentFile=-/etc/sysconfig/docker
|
||||||
EnvironmentFile=-/etc/sysconfig/docker-network
|
EnvironmentFile=-/etc/sysconfig/docker-network
|
||||||
EnvironmentFile=-/etc/sysconfig/docker-storage
|
EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||||
@@ -19,16 +20,21 @@ EnvironmentFile=-/etc/sysconfig/docker-storage
|
|||||||
EnvironmentFile=-/etc/default/docker
|
EnvironmentFile=-/etc/default/docker
|
||||||
{% endif %}
|
{% endif %}
|
||||||
Environment=GOTRACEBACK=crash
|
Environment=GOTRACEBACK=crash
|
||||||
|
ExecReload=/bin/kill -s HUP $MAINPID
|
||||||
|
Delegate=yes
|
||||||
|
KillMode=process
|
||||||
ExecStart=/usr/bin/docker daemon \
|
ExecStart=/usr/bin/docker daemon \
|
||||||
$OPTIONS \
|
$OPTIONS \
|
||||||
$DOCKER_STORAGE_OPTIONS \
|
$DOCKER_STORAGE_OPTIONS \
|
||||||
$DOCKER_NETWORK_OPTIONS \
|
$DOCKER_NETWORK_OPTIONS \
|
||||||
$INSECURE_REGISTRY
|
$INSECURE_REGISTRY \
|
||||||
|
$DOCKER_OPTS
|
||||||
|
TasksMax=infinity
|
||||||
LimitNOFILE=1048576
|
LimitNOFILE=1048576
|
||||||
LimitNPROC=1048576
|
LimitNPROC=1048576
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
MountFlags=slave
|
|
||||||
TimeoutStartSec=1min
|
TimeoutStartSec=1min
|
||||||
|
Restart=on-abnormal
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
@@ -5,7 +5,7 @@ docker_kernel_min_version: '2.6.32-431'
|
|||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: yum
|
pkg_mgr: yum
|
||||||
pkgs:
|
pkgs:
|
||||||
- docker-io
|
- name: docker-io
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|||||||
@@ -5,11 +5,14 @@ docker_versioned_pkg:
|
|||||||
latest: docker-engine
|
latest: docker-engine
|
||||||
1.9: docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
1.9: docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
||||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.11: docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
pkgs:
|
pkgs:
|
||||||
- "{{ docker_versioned_pkg[docker_version] }}"
|
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||||
|
force: yes
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: apt_key
|
pkg_key: apt_key
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ docker_kernel_min_version: '0'
|
|||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: yum
|
pkg_mgr: yum
|
||||||
pkgs:
|
pkgs:
|
||||||
- docker-io
|
- name: docker-io
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|||||||
@@ -4,11 +4,13 @@ docker_versioned_pkg:
|
|||||||
latest: docker
|
latest: docker
|
||||||
1.9: docker-1:1.9.1
|
1.9: docker-1:1.9.1
|
||||||
1.10: docker-1:1.10.1
|
1.10: docker-1:1.10.1
|
||||||
|
1.11: docker-1:1.11.2
|
||||||
|
1.12: docker-1:1.12.1
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: dnf
|
pkg_mgr: dnf
|
||||||
pkgs:
|
pkgs:
|
||||||
- "{{ docker_versioned_pkg[docker_version] }}"
|
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ docker_kernel_min_version: '0'
|
|||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: yum
|
pkg_mgr: yum
|
||||||
pkgs:
|
pkgs:
|
||||||
- docker
|
- name: docker-engine
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|||||||
29
roles/docker/vars/ubuntu-16.04.yml
Normal file
29
roles/docker/vars/ubuntu-16.04.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
docker_version: 1.11
|
||||||
|
docker_kernel_min_version: '3.2'
|
||||||
|
|
||||||
|
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
||||||
|
docker_versioned_pkg:
|
||||||
|
latest: docker-engine
|
||||||
|
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkg_mgr: apt
|
||||||
|
pkgs:
|
||||||
|
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
pkg_key: apt_key
|
||||||
|
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||||
|
repo_keys:
|
||||||
|
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
pkg_repo: apt_repository
|
||||||
|
repos:
|
||||||
|
- >
|
||||||
|
deb https://apt.dockerproject.org/repo
|
||||||
|
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||||
|
main
|
||||||
@@ -6,11 +6,14 @@ docker_versioned_pkg:
|
|||||||
latest: docker-engine
|
latest: docker-engine
|
||||||
1.9: docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
1.9: docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
||||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
pkg_mgr: apt
|
pkg_mgr: apt
|
||||||
pkgs:
|
pkgs:
|
||||||
- "{{ docker_versioned_pkg[docker_version] }}"
|
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||||
|
force: yes
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: apt_key
|
pkg_key: apt_key
|
||||||
|
|||||||
@@ -1,45 +1,51 @@
|
|||||||
---
|
---
|
||||||
local_release_dir: /tmp
|
local_release_dir: /tmp
|
||||||
|
|
||||||
|
# if this is set to true will only download files once
|
||||||
|
download_run_once: False
|
||||||
|
|
||||||
# Versions
|
# Versions
|
||||||
kube_version: v1.2.0
|
kube_version: v1.4.0
|
||||||
etcd_version: v2.2.5
|
|
||||||
calico_version: v0.17.0
|
etcd_version: v3.0.6
|
||||||
calico_cni_version: v1.0.0
|
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
weave_version: v1.4.4
|
# after migration to container download
|
||||||
|
calico_version: v0.20.0
|
||||||
|
calico_cni_version: v1.4.2
|
||||||
|
weave_version: v1.6.1
|
||||||
|
flannel_version: 0.5.5
|
||||||
|
flannel_server_helper_version: 0.1
|
||||||
|
|
||||||
# Download URL's
|
# Download URL's
|
||||||
kubelet_download_url: "https://storage.googleapis.com/kubespray/{{kube_version}}_kubernetes-kubelet"
|
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||||
apiserver_download_url: "https://storage.googleapis.com/kubespray/{{kube_version}}_kubernetes-apiserver"
|
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
|
||||||
kubectl_download_url: "https://storage.googleapis.com/kubespray/{{kube_version}}_kubernetes-kubectl"
|
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
|
||||||
|
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
||||||
etcd_download_url: "https://storage.googleapis.com/kubespray/{{etcd_version}}_etcd"
|
|
||||||
calico_download_url: "https://storage.googleapis.com/kubespray/{{calico_version}}_calico"
|
|
||||||
calico_cni_download_url: "https://storage.googleapis.com/kubespray/{{calico_cni_version}}_calico-cni-plugin"
|
|
||||||
calico_cni_ipam_download_url: "https://storage.googleapis.com/kubespray/{{calico_cni_version}}_calico-cni-plugin-ipam"
|
|
||||||
weave_download_url: "https://storage.googleapis.com/kubespray/{{weave_version}}_weave"
|
|
||||||
|
|
||||||
# Checksums
|
# Checksums
|
||||||
calico_checksum: "1fa22c0ee0cc661f56aa09169a3661fb46e552b53fae5fae9aac010e0666b281"
|
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
|
||||||
calico_cni_checksum: "cfbb95d4416cb65845a188f3bd991fff232bd5ce3463b2919d586ab77967aecd"
|
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
|
||||||
calico_cni_ipam_checksum: "93ebf8756b26314e1e3f612f1e824418cbb0a8df2942664422e697bcb109fbb2"
|
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
|
||||||
weave_checksum: "152942c330f87ab475d87d9311b91674b90f25ea685bd4e04e0495d5fe09a957"
|
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
||||||
etcd_checksum: "aa6037406257d2a1bc48ffa769afe7a4f8a04cc1ffcd36ef84f9ee8bc4eca756"
|
|
||||||
kubectl_checksum: "0fd51875a4783fb106f769bdbc81012066b4a2785ba88b0280870a25cab76296"
|
# Containers
|
||||||
kubelet_checksum: "a1da4b8d0965f66b7243d22f2b307227ec24bbd7ce8522cd3ce4ec1206c3a09e"
|
# Possible values: host, docker
|
||||||
kube_apiserver_checksum: "fe50e4014a96897a708b3c847550b4e510a390585209c2b11c02a32123570d43"
|
etcd_deployment_type: "docker"
|
||||||
|
etcd_image_repo: "quay.io/coreos/etcd"
|
||||||
|
etcd_image_tag: "{{ etcd_version }}"
|
||||||
|
flannel_server_helper_image_repo: "gcr.io/google_containers/flannel-server-helper"
|
||||||
|
flannel_server_helper_image_tag: "{{ flannel_server_helper_version }}"
|
||||||
|
flannel_image_repo: "quay.io/coreos/flannel"
|
||||||
|
flannel_image_tag: "{{ flannel_version }}"
|
||||||
|
calicoctl_image_repo: "calico/ctl"
|
||||||
|
calicoctl_image_tag: "{{ calico_version }}"
|
||||||
|
calico_node_image_repo: "calico/node"
|
||||||
|
calico_node_image_tag: "{{ calico_version }}"
|
||||||
|
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
||||||
|
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
||||||
|
|
||||||
downloads:
|
downloads:
|
||||||
- name: calico
|
calico_cni_plugin:
|
||||||
dest: calico/bin/calicoctl
|
|
||||||
version: "{{calico_version}}"
|
|
||||||
sha256: "{{ calico_checksum }}"
|
|
||||||
source_url: "{{ calico_download_url }}"
|
|
||||||
url: "{{ calico_download_url }}"
|
|
||||||
owner: "root"
|
|
||||||
mode: "0755"
|
|
||||||
|
|
||||||
- name: calico-cni-plugin
|
|
||||||
dest: calico/bin/calico
|
dest: calico/bin/calico
|
||||||
version: "{{calico_cni_version}}"
|
version: "{{calico_cni_version}}"
|
||||||
sha256: "{{ calico_cni_checksum }}"
|
sha256: "{{ calico_cni_checksum }}"
|
||||||
@@ -47,8 +53,8 @@ downloads:
|
|||||||
url: "{{ calico_cni_download_url }}"
|
url: "{{ calico_cni_download_url }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
- name: calico-cni-plugin-ipam
|
calico_cni_plugin_ipam:
|
||||||
dest: calico/bin/calico-ipam
|
dest: calico/bin/calico-ipam
|
||||||
version: "{{calico_cni_version}}"
|
version: "{{calico_cni_version}}"
|
||||||
sha256: "{{ calico_cni_ipam_checksum }}"
|
sha256: "{{ calico_cni_ipam_checksum }}"
|
||||||
@@ -56,8 +62,8 @@ downloads:
|
|||||||
url: "{{ calico_cni_ipam_download_url }}"
|
url: "{{ calico_cni_ipam_download_url }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
- name: weave
|
weave:
|
||||||
dest: weave/bin/weave
|
dest: weave/bin/weave
|
||||||
version: "{{weave_version}}"
|
version: "{{weave_version}}"
|
||||||
source_url: "{{weave_download_url}}"
|
source_url: "{{weave_download_url}}"
|
||||||
@@ -65,8 +71,8 @@ downloads:
|
|||||||
sha256: "{{ weave_checksum }}"
|
sha256: "{{ weave_checksum }}"
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
- name: etcd
|
etcd:
|
||||||
version: "{{etcd_version}}"
|
version: "{{etcd_version}}"
|
||||||
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||||
sha256: "{{ etcd_checksum }}"
|
sha256: "{{ etcd_checksum }}"
|
||||||
@@ -75,30 +81,44 @@ downloads:
|
|||||||
unarchive: true
|
unarchive: true
|
||||||
owner: "etcd"
|
owner: "etcd"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
container: "{{ etcd_deployment_type == 'docker' }}"
|
||||||
|
repo: "{{ etcd_image_repo }}"
|
||||||
|
tag: "{{ etcd_image_tag }}"
|
||||||
|
hyperkube:
|
||||||
|
container: true
|
||||||
|
repo: "{{ hyperkube_image_repo }}"
|
||||||
|
tag: "{{ hyperkube_image_tag }}"
|
||||||
|
flannel:
|
||||||
|
container: true
|
||||||
|
repo: "{{ flannel_image_repo }}"
|
||||||
|
tag: "{{ flannel_image_tag }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
|
flannel_server_helper:
|
||||||
|
container: true
|
||||||
|
repo: "{{ flannel_server_helper_image_repo }}"
|
||||||
|
tag: "{{ flannel_server_helper_image_tag }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
|
calicoctl:
|
||||||
|
container: true
|
||||||
|
repo: "{{ calicoctl_image_repo }}"
|
||||||
|
tag: "{{ calicoctl_image_tag }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
|
calico_node:
|
||||||
|
container: true
|
||||||
|
repo: "{{ calico_node_image_repo }}"
|
||||||
|
tag: "{{ calico_node_image_tag }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||||
|
|
||||||
- name: kubernetes-kubelet
|
download:
|
||||||
version: "{{kube_version}}"
|
container: "{{ file.container|default('false') }}"
|
||||||
dest: kubernetes/bin/kubelet
|
repo: "{{ file.repo|default(None) }}"
|
||||||
sha256: "{{kubelet_checksum}}"
|
tag: "{{ file.tag|default(None) }}"
|
||||||
source_url: "{{ kubelet_download_url }}"
|
enabled: "{{ file.enabled|default('true') }}"
|
||||||
url: "{{ kubelet_download_url }}"
|
dest: "{{ file.dest|default(None) }}"
|
||||||
owner: "kube"
|
version: "{{ file.version|default(None) }}"
|
||||||
mode: "0755"
|
sha256: "{{ file.sha256|default(None) }}"
|
||||||
|
source_url: "{{ file.source_url|default(None) }}"
|
||||||
- name: kubernetes-kubectl
|
url: "{{ file.url|default(None) }}"
|
||||||
dest: kubernetes/bin/kubectl
|
unarchive: "{{ file.unarchive|default('false') }}"
|
||||||
version: "{{kube_version}}"
|
owner: "{{ file.owner|default('kube') }}"
|
||||||
sha256: "{{kubectl_checksum}}"
|
mode: "{{ file.mode|default(None) }}"
|
||||||
source_url: "{{ kubectl_download_url }}"
|
|
||||||
url: "{{ kubectl_download_url }}"
|
|
||||||
owner: "kube"
|
|
||||||
mode: "0755"
|
|
||||||
|
|
||||||
- name: kubernetes-apiserver
|
|
||||||
dest: kubernetes/bin/kube-apiserver
|
|
||||||
version: "{{kube_version}}"
|
|
||||||
sha256: "{{kube_apiserver_checksum}}"
|
|
||||||
source_url: "{{ apiserver_download_url }}"
|
|
||||||
url: "{{ apiserver_download_url }}"
|
|
||||||
owner: "kube"
|
|
||||||
mode: "0755"
|
|
||||||
|
|||||||
@@ -1,32 +1,83 @@
|
|||||||
---
|
---
|
||||||
|
- name: downloading...
|
||||||
|
debug:
|
||||||
|
msg: "{{ download.url }}"
|
||||||
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
|
|
||||||
- name: Create dest directories
|
- name: Create dest directories
|
||||||
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
|
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
|
||||||
with_items: downloads
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
|
|
||||||
- name: Download items
|
- name: Download items
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{item.url}}"
|
url: "{{download.url}}"
|
||||||
dest: "{{local_release_dir}}/{{item.dest}}"
|
dest: "{{local_release_dir}}/{{download.dest}}"
|
||||||
sha256sum: "{{item.sha256 | default(omit)}}"
|
sha256sum: "{{download.sha256 | default(omit)}}"
|
||||||
owner: "{{ item.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ item.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
with_items: downloads
|
register: get_url_result
|
||||||
|
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||||
|
|
||||||
- name: Extract archives
|
- name: Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ local_release_dir }}/{{item.dest}}"
|
src: "{{ local_release_dir }}/{{download.dest}}"
|
||||||
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
|
dest: "{{ local_release_dir }}/{{download.dest|dirname}}"
|
||||||
owner: "{{ item.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ item.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
copy: no
|
copy: no
|
||||||
when: "{{item.unarchive is defined and item.unarchive == True}}"
|
when: "{{ download.enabled|bool and not download.container|bool and download.unarchive is defined and download.unarchive == True }}"
|
||||||
with_items: downloads
|
|
||||||
|
|
||||||
- name: Fix permissions
|
- name: Fix permissions
|
||||||
file:
|
file:
|
||||||
state: file
|
state: file
|
||||||
path: "{{local_release_dir}}/{{item.dest}}"
|
path: "{{local_release_dir}}/{{download.dest}}"
|
||||||
owner: "{{ item.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ item.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
when: "{{item.unarchive is not defined or item.unarchive == False}}"
|
when: "{{ download.enabled|bool and not download.container|bool and (download.unarchive is not defined or download.unarchive == False) }}"
|
||||||
with_items: downloads
|
|
||||||
|
- name: pulling...
|
||||||
|
debug:
|
||||||
|
msg: "{{ download.repo }}:{{ download.tag }}"
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
|
||||||
|
- name: Create dest directory for saved/loaded container images
|
||||||
|
file: path="{{local_release_dir}}/containers" state=directory recurse=yes
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
|
||||||
|
#NOTE(bogdando) this brings no docker-py deps for nodes
|
||||||
|
- name: Download containers
|
||||||
|
command: "/usr/bin/docker pull {{ download.repo }}:{{ download.tag }}"
|
||||||
|
register: pull_task_result
|
||||||
|
until: pull_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
|
||||||
|
run_once: "{{ download_run_once|bool }}"
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
|
||||||
|
|
||||||
|
- name: Download | save container images
|
||||||
|
shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
|
||||||
|
delegate_to: "{{groups['kube-master'][0]}}"
|
||||||
|
run_once: true
|
||||||
|
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||||
|
|
||||||
|
- name: Download | get container images
|
||||||
|
synchronize:
|
||||||
|
src: "{{ fname }}"
|
||||||
|
dest: "{{local_release_dir}}/containers"
|
||||||
|
mode: push
|
||||||
|
register: get_task
|
||||||
|
until: get_task|success
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||||
|
|
||||||
|
- name: Download | load container images
|
||||||
|
shell: docker load < "{{ fname }}"
|
||||||
|
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||||
|
|||||||
@@ -1,3 +1,2 @@
|
|||||||
---
|
---
|
||||||
etcd_version: v2.2.5
|
|
||||||
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
|
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
|
||||||
|
|||||||
@@ -2,14 +2,36 @@
|
|||||||
- name: restart etcd
|
- name: restart etcd
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
- reload systemd
|
- etcd | reload systemd
|
||||||
- reload etcd
|
- reload etcd
|
||||||
|
- wait for etcd up
|
||||||
|
|
||||||
- name: reload systemd
|
- name: restart etcd-proxy
|
||||||
|
command: /bin/true
|
||||||
|
notify:
|
||||||
|
- etcd | reload systemd
|
||||||
|
- reload etcd-proxy
|
||||||
|
- wait for etcd up
|
||||||
|
|
||||||
|
- name: etcd | reload systemd
|
||||||
command: systemctl daemon-reload
|
command: systemctl daemon-reload
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd"
|
||||||
|
|
||||||
|
- name: wait for etcd up
|
||||||
|
uri: url="http://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||||
|
register: result
|
||||||
|
until: result.status == 200
|
||||||
|
retries: 10
|
||||||
|
delay: 5
|
||||||
|
|
||||||
- name: reload etcd
|
- name: reload etcd
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: restarted
|
state: restarted
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: reload etcd-proxy
|
||||||
|
service:
|
||||||
|
name: etcd-proxy
|
||||||
|
state: restarted
|
||||||
|
when: is_etcd_proxy
|
||||||
|
|||||||
9
roles/etcd/meta/main.yml
Normal file
9
roles/etcd/meta/main.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: adduser
|
||||||
|
user: "{{ addusers.etcd }}"
|
||||||
|
when: ansible_os_family != 'CoreOS'
|
||||||
|
- role: docker
|
||||||
|
when: (ansible_os_family != "CoreOS" and etcd_deployment_type == "docker" or inventory_hostname in groups['k8s-cluster'])
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.etcd }}"
|
||||||
@@ -1,23 +1,44 @@
|
|||||||
---
|
---
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Check if member is in cluster
|
||||||
|
shell: "etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
|
register: etcd_member_in_cluster
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Configure | Add member to the cluster if it is not there
|
||||||
|
when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||||
|
shell: "etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||||
|
|
||||||
|
- name: Configure | Copy etcd.service systemd file
|
||||||
template:
|
template:
|
||||||
src: etcd.service.j2
|
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||||
dest: /etc/systemd/system/etcd.service
|
dest: /etc/systemd/system/etcd.service
|
||||||
backup: yes
|
backup: yes
|
||||||
when: ansible_service_mgr == "systemd"
|
when: ansible_service_mgr == "systemd" and is_etcd_master
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
- name: Configure | Write etcd initd script
|
- name: Configure | Write etcd initd script
|
||||||
template:
|
template:
|
||||||
src: deb-etcd.initd.j2
|
src: "deb-etcd-{{ etcd_deployment_type }}.initd.j2"
|
||||||
dest: /etc/init.d/etcd
|
dest: /etc/init.d/etcd
|
||||||
owner: root
|
owner: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
|
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_master
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
|
|
||||||
- name: Configure | Create etcd config file
|
- name: Configure | Copy etcd-proxy.service systemd file
|
||||||
template:
|
template:
|
||||||
src: etcd.j2
|
src: "etcd-proxy-{{ etcd_deployment_type }}.service.j2"
|
||||||
dest: /etc/etcd.env
|
dest: /etc/systemd/system/etcd-proxy.service
|
||||||
notify: restart etcd
|
backup: yes
|
||||||
|
when: ansible_service_mgr == "systemd" and is_etcd_proxy
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
- name: Configure | Write etcd-proxy initd script
|
||||||
|
template:
|
||||||
|
src: "deb-etcd-proxy-{{ etcd_deployment_type }}.initd.j2"
|
||||||
|
dest: /etc/init.d/etcd-proxy
|
||||||
|
owner: root
|
||||||
|
mode: 0755
|
||||||
|
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_proxy
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
|||||||
@@ -1,9 +1,43 @@
|
|||||||
---
|
---
|
||||||
- name: Install | Copy etcd binary
|
- name: Install | Copy etcd binary from downloaddir
|
||||||
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
|
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
|
||||||
|
when: etcd_deployment_type == "host"
|
||||||
register: etcd_copy
|
register: etcd_copy
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Install | Copy etcdctl binary
|
- name: Install | Copy etcdctl binary from downloaddir
|
||||||
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
|
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
|
||||||
|
when: etcd_deployment_type == "host"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
|
#Plan A: no docker-py deps
|
||||||
|
- name: Install | Copy etcdctl binary from container
|
||||||
|
command: sh -c "/usr/bin/docker rm -f etcdctl-binarycopy;
|
||||||
|
/usr/bin/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
||||||
|
/usr/bin/docker cp etcdctl-binarycopy:{{ etcd_container_bin_dir }}etcdctl {{ bin_dir }}/etcdctl &&
|
||||||
|
/usr/bin/docker rm -f etcdctl-binarycopy"
|
||||||
|
when: etcd_deployment_type == "docker"
|
||||||
|
register: etcd_task_result
|
||||||
|
until: etcd_task_result.rc == 0
|
||||||
|
retries: 4
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
#Plan B: looks nicer, but requires docker-py on all hosts:
|
||||||
|
#- name: Install | Set up etcd-binarycopy container
|
||||||
|
# docker:
|
||||||
|
# name: etcd-binarycopy
|
||||||
|
# state: present
|
||||||
|
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
|
||||||
|
# when: etcd_deployment_type == "docker"
|
||||||
|
#
|
||||||
|
#- name: Install | Copy etcdctl from etcd-binarycopy container
|
||||||
|
# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl"
|
||||||
|
# when: etcd_deployment_type == "docker"
|
||||||
|
#
|
||||||
|
#- name: Install | Clean up etcd-binarycopy container
|
||||||
|
# docker:
|
||||||
|
# name: etcd-binarycopy
|
||||||
|
# state: absent
|
||||||
|
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
|
||||||
|
# when: etcd_deployment_type == "docker"
|
||||||
|
|||||||
@@ -1,18 +1,38 @@
|
|||||||
---
|
---
|
||||||
- include: install.yml
|
- include: install.yml
|
||||||
|
- include: set_cluster_health.yml
|
||||||
- include: configure.yml
|
- include: configure.yml
|
||||||
|
- include: refresh_config.yml
|
||||||
- name: Restart etcd if binary changed
|
|
||||||
command: /bin/true
|
|
||||||
notify: restart etcd
|
|
||||||
when: etcd_copy.stdout_lines
|
|
||||||
|
|
||||||
# reload systemd before starting service
|
|
||||||
- meta: flush_handlers
|
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure etcd is running
|
- name: Ensure etcd is running
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Ensure etcd-proxy is running
|
||||||
|
service:
|
||||||
|
name: etcd-proxy
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
when: is_etcd_proxy
|
||||||
|
|
||||||
|
- name: Restart etcd if binary changed
|
||||||
|
command: /bin/true
|
||||||
|
notify: restart etcd
|
||||||
|
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
|
||||||
|
|
||||||
|
- name: Restart etcd-proxy if binary changed
|
||||||
|
command: /bin/true
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_proxy
|
||||||
|
|
||||||
|
# Reload systemd before starting service
|
||||||
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
# After etcd cluster is assembled, make sure that
|
||||||
|
# initial state of the cluster is in `existing`
|
||||||
|
# state insted of `new`.
|
||||||
|
- include: set_cluster_health.yml
|
||||||
|
- include: refresh_config.yml
|
||||||
|
|||||||
14
roles/etcd/tasks/refresh_config.yml
Normal file
14
roles/etcd/tasks/refresh_config.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Refresh config | Create etcd config file
|
||||||
|
template:
|
||||||
|
src: etcd.j2
|
||||||
|
dest: /etc/etcd.env
|
||||||
|
notify: restart etcd
|
||||||
|
when: is_etcd_master
|
||||||
|
|
||||||
|
- name: Refresh config | Create etcd-proxy config file
|
||||||
|
template:
|
||||||
|
src: etcd-proxy.j2
|
||||||
|
dest: /etc/etcd-proxy.env
|
||||||
|
notify: restart etcd-proxy
|
||||||
|
when: is_etcd_proxy
|
||||||
7
roles/etcd/tasks/set_cluster_health.yml
Normal file
7
roles/etcd/tasks/set_cluster_health.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Configure | Check if cluster is healthy
|
||||||
|
shell: "etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||||
|
register: etcd_cluster_is_healthy
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
when: is_etcd_master
|
||||||
119
roles/etcd/templates/deb-etcd-docker.initd.j2
Normal file
119
roles/etcd/templates/deb-etcd-docker.initd.j2
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -a
|
||||||
|
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: etcd
|
||||||
|
# Required-Start: $local_fs $network $syslog
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: etcd distributed k/v store
|
||||||
|
# Description:
|
||||||
|
# etcd is a distributed, consistent key-value store for shared configuration and service discovery
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
|
||||||
|
DESC="etcd k/v store"
|
||||||
|
NAME=etcd
|
||||||
|
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
|
||||||
|
DAEMON_EXEC=`basename $DAEMON`
|
||||||
|
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd.env \
|
||||||
|
--net=host \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
-v /var/lib/etcd:/var/lib/etcd:rw \
|
||||||
|
--name={{ etcd_member_name | default("etcd") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}"
|
||||||
|
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
DAEMON_USER=root
|
||||||
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
PID=/var/run/etcd.pid
|
||||||
|
|
||||||
|
# Exit if the binary is not present
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
do_status()
|
||||||
|
{
|
||||||
|
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function that starts the daemon/service
|
||||||
|
#
|
||||||
|
do_start()
|
||||||
|
{
|
||||||
|
{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }} &>/dev/null || true
|
||||||
|
sleep 1
|
||||||
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
|
$DAEMON_ARGS \
|
||||||
|
|| return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Function that stops the daemon/service
|
||||||
|
#
|
||||||
|
do_stop()
|
||||||
|
{
|
||||||
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
|
||||||
|
RETVAL="$?"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
return "$RETVAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
do_start
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 || exit 0 ;;
|
||||||
|
2) log_end_msg 1 || exit 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "Can't stop etcd"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if do_status; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "etcd is not running"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
restart|force-reload)
|
||||||
|
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
if do_start; then
|
||||||
|
log_end_msg 0
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
log_failure_msg "Can't restart etcd"
|
||||||
|
log_end_msg ${rc}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
@@ -16,11 +16,6 @@ PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
|||||||
DESC="etcd k/v store"
|
DESC="etcd k/v store"
|
||||||
NAME=etcd
|
NAME=etcd
|
||||||
DAEMON={{ bin_dir }}/etcd
|
DAEMON={{ bin_dir }}/etcd
|
||||||
{% if inventory_hostname in groups['etcd'] %}
|
|
||||||
DAEMON_ARGS=""
|
|
||||||
{% else %}
|
|
||||||
DAEMON_ARGS="-proxy on"
|
|
||||||
{% endif %}
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
DAEMON_USER=etcd
|
DAEMON_USER=etcd
|
||||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
@@ -111,3 +106,4 @@ case "$1" in
|
|||||||
exit 3
|
exit 3
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
120
roles/etcd/templates/deb-etcd-proxy-docker.initd.j2
Normal file
120
roles/etcd/templates/deb-etcd-proxy-docker.initd.j2
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -a
|
||||||
|
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: etcd-proxy
|
||||||
|
# Required-Start: $local_fs $network $syslog
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: etcd-proxy
|
||||||
|
# Description:
|
||||||
|
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
|
||||||
|
DESC="etcd-proxy"
|
||||||
|
NAME=etcd-proxy
|
||||||
|
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
|
||||||
|
DAEMON_EXEC=`basename $DAEMON`
|
||||||
|
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd-proxy.env \
|
||||||
|
--net=host \
|
||||||
|
--stop-signal=SIGKILL \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}"
|
||||||
|
|
||||||
|
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
DAEMON_USER=root
|
||||||
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
PID=/var/run/etcd-proxy.pid
|
||||||
|
|
||||||
|
# Exit if the binary is not present
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
do_status()
|
||||||
|
{
|
||||||
|
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function that starts the daemon/service
|
||||||
|
#
|
||||||
|
do_start()
|
||||||
|
{
|
||||||
|
{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }} &>/dev/null || true
|
||||||
|
sleep 1
|
||||||
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
|
$DAEMON_ARGS \
|
||||||
|
|| return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Function that stops the daemon/service
|
||||||
|
#
|
||||||
|
do_stop()
|
||||||
|
{
|
||||||
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
|
||||||
|
RETVAL="$?"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
return "$RETVAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
do_start
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 || exit 0 ;;
|
||||||
|
2) log_end_msg 1 || exit 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "Can't stop etcd-proxy"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if do_status; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "etcd-proxy is not running"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
restart|force-reload)
|
||||||
|
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
if do_start; then
|
||||||
|
log_end_msg 0
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
log_failure_msg "Can't restart etcd-proxy"
|
||||||
|
log_end_msg ${rc}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
110
roles/etcd/templates/deb-etcd-proxy-host.initd.j2
Normal file
110
roles/etcd/templates/deb-etcd-proxy-host.initd.j2
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -a
|
||||||
|
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: etcd-proxy
|
||||||
|
# Required-Start: $local_fs $network $syslog
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: etcd-proxy
|
||||||
|
# Description:
|
||||||
|
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="etcd-proxy"
|
||||||
|
NAME=etcd-proxy
|
||||||
|
DAEMON={{ bin_dir }}/etcd
|
||||||
|
DAEMON_ARGS=""
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
DAEMON_USER=etcd
|
||||||
|
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||||
|
PID=/var/run/etcd-proxy.pid
|
||||||
|
|
||||||
|
# Exit if the binary is not present
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -f /etc/etcd-proxy.env ] && . /etc/etcd-proxy.env
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
do_status()
|
||||||
|
{
|
||||||
|
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function that starts the daemon/service
|
||||||
|
#
|
||||||
|
do_start()
|
||||||
|
{
|
||||||
|
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
|
||||||
|
$DAEMON_ARGS \
|
||||||
|
|| return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Function that stops the daemon/service
|
||||||
|
#
|
||||||
|
do_stop()
|
||||||
|
{
|
||||||
|
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
|
||||||
|
RETVAL="$?"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
return "$RETVAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
do_start
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 || exit 0 ;;
|
||||||
|
2) log_end_msg 1 || exit 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "Can't stop etcd-proxy"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if do_status; then
|
||||||
|
log_end_msg 0
|
||||||
|
else
|
||||||
|
log_failure_msg "etcd-proxy is not running"
|
||||||
|
log_end_msg 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
restart|force-reload)
|
||||||
|
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||||
|
if do_stop; then
|
||||||
|
if do_start; then
|
||||||
|
log_end_msg 0
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
rc="$?"
|
||||||
|
fi
|
||||||
|
log_failure_msg "Can't restart etcd-proxy"
|
||||||
|
log_end_msg ${rc}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
28
roles/etcd/templates/etcd-docker.service.j2
Normal file
28
roles/etcd/templates/etcd-docker.service.j2
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=etcd docker wrapper
|
||||||
|
Wants=docker.socket
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
|
||||||
|
--env-file=/etc/etcd.env \
|
||||||
|
{# TODO(mattymo): Allow docker IP binding and disable in envfile
|
||||||
|
-p 2380:2380 -p 2379:2379 #}
|
||||||
|
--net=host \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
-v /var/lib/etcd:/var/lib/etcd:rw \
|
||||||
|
--name={{ etcd_member_name | default("etcd") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}
|
||||||
|
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
|
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
|
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_member_name | default("etcd-proxy") }}
|
||||||
|
Restart=always
|
||||||
|
RestartSec=15s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -1,15 +1,13 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=etcd
|
Description=etcd
|
||||||
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
Type=notify
|
||||||
User=etcd
|
User=etcd
|
||||||
EnvironmentFile=/etc/etcd.env
|
EnvironmentFile=/etc/etcd.env
|
||||||
{% if inventory_hostname in groups['etcd'] %}
|
|
||||||
ExecStart={{ bin_dir }}/etcd
|
ExecStart={{ bin_dir }}/etcd
|
||||||
{% else %}
|
NotifyAccess=all
|
||||||
ExecStart={{ bin_dir }}/etcd -proxy on
|
|
||||||
{% endif %}
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
LimitNOFILE=40000
|
LimitNOFILE=40000
|
||||||
28
roles/etcd/templates/etcd-proxy-docker.service.j2
Normal file
28
roles/etcd/templates/etcd-proxy-docker.service.j2
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=etcd-proxy docker wrapper
|
||||||
|
Wants=docker.socket
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
|
||||||
|
--env-file=/etc/etcd-proxy.env \
|
||||||
|
{# TODO(mattymo): Allow docker IP binding and disable in envfile
|
||||||
|
-p 2380:2380 -p 2379:2379 #}
|
||||||
|
--net=host \
|
||||||
|
--stop-signal=SIGKILL \
|
||||||
|
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
|
||||||
|
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
|
||||||
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
{% if etcd_after_v3 %}
|
||||||
|
{{ etcd_container_bin_dir }}etcd
|
||||||
|
{% endif %}
|
||||||
|
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
Restart=always
|
||||||
|
RestartSec=15s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
19
roles/etcd/templates/etcd-proxy-host.service.j2
Normal file
19
roles/etcd/templates/etcd-proxy-host.service.j2
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=etcd-proxy
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
User=etcd
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
EnvironmentFile=/etc/etcd-proxy.env
|
||||||
|
ExecStart={{ bin_dir }}/etcd
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/etcd-proxy
|
||||||
|
ExecStartPre=/bin/chown -R etcd: /var/lib/etcd-proxy
|
||||||
|
NotifyAccess=all
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10s
|
||||||
|
LimitNOFILE=40000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
5
roles/etcd/templates/etcd-proxy.j2
Normal file
5
roles/etcd/templates/etcd-proxy.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
ETCD_DATA_DIR=/var/lib/etcd-proxy
|
||||||
|
ETCD_PROXY=on
|
||||||
|
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
|
||||||
|
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
|
||||||
|
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
@@ -1,17 +1,16 @@
|
|||||||
ETCD_DATA_DIR="/var/lib/etcd"
|
ETCD_DATA_DIR=/var/lib/etcd
|
||||||
{% if inventory_hostname in groups['etcd'] %}
|
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
|
||||||
{% set etcd = {} %}
|
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
|
||||||
{% for host in groups['etcd'] %}
|
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
||||||
{% if inventory_hostname == host %}
|
|
||||||
{% set _dummy = etcd.update({'name':"etcd"+loop.index|string}) %}
|
{% if not is_etcd_proxy %}
|
||||||
{% endif %}
|
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379,http://127.0.0.1:2379
|
||||||
{% endfor %}
|
{% else %}
|
||||||
ETCD_ADVERTISE_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address)) }}:2379"
|
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address)) }}:2380"
|
|
||||||
ETCD_INITIAL_CLUSTER_STATE="new"
|
|
||||||
ETCD_INITIAL_CLUSTER_TOKEN="k8s_etcd"
|
|
||||||
ETCD_LISTEN_PEER_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380"
|
|
||||||
ETCD_NAME="{{ etcd.name }}"
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
ETCD_INITIAL_CLUSTER="{% for host in groups['etcd'] %}etcd{{ loop.index|string }}=http://{{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
|
ETCD_ELECTION_TIMEOUT=10000
|
||||||
ETCD_LISTEN_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"
|
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
|
||||||
|
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
|
||||||
|
ETCD_NAME={{ etcd_member_name }}
|
||||||
|
ETCD_PROXY=off
|
||||||
|
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|||||||
305
roles/kubernetes-apps/ansible/library/kube.py
Normal file
305
roles/kubernetes-apps/ansible/library/kube.py
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: kube
|
||||||
|
short_description: Manage Kubernetes Cluster
|
||||||
|
description:
|
||||||
|
- Create, replace, remove, and stop resources within a Kubernetes Cluster
|
||||||
|
version_added: "2.0"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The name associated with resource
|
||||||
|
filename:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The path and filename of the resource(s) definition file.
|
||||||
|
kubectl:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The path to the kubectl bin
|
||||||
|
namespace:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The namespace associated with the resource(s)
|
||||||
|
resource:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
|
||||||
|
label:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The labels used to filter specific resources.
|
||||||
|
server:
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
description:
|
||||||
|
- The url for the API server that commands are executed against.
|
||||||
|
force:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
description:
|
||||||
|
- A flag to indicate to force delete, replace, or stop.
|
||||||
|
all:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
description:
|
||||||
|
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
|
||||||
|
log_level:
|
||||||
|
required: false
|
||||||
|
default: 0
|
||||||
|
description:
|
||||||
|
- Indicates the level of verbosity of logging by kubectl.
|
||||||
|
state:
|
||||||
|
required: false
|
||||||
|
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
|
||||||
|
default: present
|
||||||
|
description:
|
||||||
|
- present handles checking existence or creating if definition file provided,
|
||||||
|
absent handles deleting resource(s) based on other options,
|
||||||
|
latest handles creating ore updating based on existence,
|
||||||
|
reloaded handles updating resource(s) definition using definition file,
|
||||||
|
stopped handles stopping resource(s) based on other options.
|
||||||
|
requirements:
|
||||||
|
- kubectl
|
||||||
|
author: "Kenny Jones (@kenjones-cisco)"
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: test nginx is present
|
||||||
|
kube: name=nginx resource=rc state=present
|
||||||
|
|
||||||
|
- name: test nginx is stopped
|
||||||
|
kube: name=nginx resource=rc state=stopped
|
||||||
|
|
||||||
|
- name: test nginx is absent
|
||||||
|
kube: name=nginx resource=rc state=absent
|
||||||
|
|
||||||
|
- name: test nginx is present
|
||||||
|
kube: filename=/tmp/nginx.yml
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class KubeManager(object):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
|
||||||
|
self.module = module
|
||||||
|
|
||||||
|
self.kubectl = module.params.get('kubectl')
|
||||||
|
if self.kubectl is None:
|
||||||
|
self.kubectl = module.get_bin_path('kubectl', True)
|
||||||
|
self.base_cmd = [self.kubectl]
|
||||||
|
|
||||||
|
if module.params.get('server'):
|
||||||
|
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||||
|
|
||||||
|
if module.params.get('log_level'):
|
||||||
|
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
|
||||||
|
|
||||||
|
if module.params.get('namespace'):
|
||||||
|
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||||
|
|
||||||
|
self.all = module.params.get('all')
|
||||||
|
self.force = module.params.get('force')
|
||||||
|
self.name = module.params.get('name')
|
||||||
|
self.filename = module.params.get('filename')
|
||||||
|
self.resource = module.params.get('resource')
|
||||||
|
self.label = module.params.get('label')
|
||||||
|
|
||||||
|
def _execute(self, cmd):
|
||||||
|
args = self.base_cmd + cmd
|
||||||
|
try:
|
||||||
|
rc, out, err = self.module.run_command(args)
|
||||||
|
if rc != 0:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||||
|
except Exception as exc:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||||
|
return out.splitlines()
|
||||||
|
|
||||||
|
def _execute_nofail(self, cmd):
|
||||||
|
args = self.base_cmd + cmd
|
||||||
|
rc, out, err = self.module.run_command(args)
|
||||||
|
if rc != 0:
|
||||||
|
return None
|
||||||
|
return out.splitlines()
|
||||||
|
|
||||||
|
def create(self, check=True):
|
||||||
|
if check and self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['create']
|
||||||
|
|
||||||
|
if not self.filename:
|
||||||
|
self.module.fail_json(msg='filename required to create')
|
||||||
|
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def replace(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['replace']
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
if not self.filename:
|
||||||
|
self.module.fail_json(msg='filename required to reload')
|
||||||
|
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['delete']
|
||||||
|
|
||||||
|
if self.filename:
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
else:
|
||||||
|
if not self.resource:
|
||||||
|
self.module.fail_json(msg='resource required to delete without filename')
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all')
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--ignore-not-found')
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
cmd = ['get']
|
||||||
|
|
||||||
|
if not self.resource:
|
||||||
|
return False
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
cmd.append('--no-headers')
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all-namespaces')
|
||||||
|
|
||||||
|
result = self._execute_nofail(cmd)
|
||||||
|
if not result:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
|
||||||
|
if not self.force and not self.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cmd = ['stop']
|
||||||
|
|
||||||
|
if self.filename:
|
||||||
|
cmd.append('--filename=' + self.filename)
|
||||||
|
else:
|
||||||
|
if not self.resource:
|
||||||
|
self.module.fail_json(msg='resource required to stop without filename')
|
||||||
|
|
||||||
|
cmd.append(self.resource)
|
||||||
|
|
||||||
|
if self.name:
|
||||||
|
cmd.append(self.name)
|
||||||
|
|
||||||
|
if self.label:
|
||||||
|
cmd.append('--selector=' + self.label)
|
||||||
|
|
||||||
|
if self.all:
|
||||||
|
cmd.append('--all')
|
||||||
|
|
||||||
|
if self.force:
|
||||||
|
cmd.append('--ignore-not-found')
|
||||||
|
|
||||||
|
return self._execute(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
name=dict(),
|
||||||
|
filename=dict(),
|
||||||
|
namespace=dict(),
|
||||||
|
resource=dict(),
|
||||||
|
label=dict(),
|
||||||
|
server=dict(),
|
||||||
|
kubectl=dict(),
|
||||||
|
force=dict(default=False, type='bool'),
|
||||||
|
all=dict(default=False, type='bool'),
|
||||||
|
log_level=dict(default=0, type='int'),
|
||||||
|
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
manager = KubeManager(module)
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
result = manager.create()
|
||||||
|
|
||||||
|
elif state == 'absent':
|
||||||
|
result = manager.delete()
|
||||||
|
|
||||||
|
elif state == 'reloaded':
|
||||||
|
result = manager.replace()
|
||||||
|
|
||||||
|
elif state == 'stopped':
|
||||||
|
result = manager.stop()
|
||||||
|
|
||||||
|
elif state == 'latest':
|
||||||
|
if manager.exists():
|
||||||
|
manager.force = True
|
||||||
|
result = manager.replace()
|
||||||
|
else:
|
||||||
|
result = manager.create(check=False)
|
||||||
|
|
||||||
|
else:
|
||||||
|
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
changed = True
|
||||||
|
module.exit_json(changed=changed,
|
||||||
|
msg='success: %s' % (' '.join(result))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import * # noqa
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
- name: Write calico-policy-controller yaml
|
||||||
|
template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
||||||
|
- name: Start of Calico policy controller
|
||||||
|
kube:
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: /etc/kubernetes/calico-policy-controller.yml
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
23
roles/kubernetes-apps/ansible/tasks/main.yaml
Normal file
23
roles/kubernetes-apps/ansible/tasks/main.yaml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay Down KubeDNS Template
|
||||||
|
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||||
|
with_items:
|
||||||
|
- {file: kubedns-rc.yml, type: rc}
|
||||||
|
- {file: kubedns-svc.yml, type: svc}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Start Resources
|
||||||
|
kube:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: /etc/kubernetes/{{item.item.file}}
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
||||||
|
- include: tasks/calico-policy-controller.yml
|
||||||
|
when: enable_network_policy is defined and enable_network_policy == True
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: ReplicaSet
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
k8s-app: calico-policy
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
k8s-app: calico-policy
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: calico-policy-controller
|
||||||
|
image: calico/kube-policy-controller:latest
|
||||||
|
env:
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
value: "{{ etcd_endpoint }}"
|
||||||
|
# Location of the Kubernetes API - this shouldn't need to be
|
||||||
|
# changed so long as it is used in conjunction with
|
||||||
|
# CONFIGURE_ETC_HOSTS="true".
|
||||||
|
- name: K8S_API
|
||||||
|
value: "https://kubernetes.default:443"
|
||||||
|
# Configure /etc/hosts within the container to resolve
|
||||||
|
# the kubernetes.default Service to the correct clusterIP
|
||||||
|
# using the environment provided by the kubelet.
|
||||||
|
# This removes the need for KubeDNS to resolve the Service.
|
||||||
|
- name: CONFIGURE_ETC_HOSTS
|
||||||
|
value: "true"
|
||||||
100
roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
Normal file
100
roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ReplicationController
|
||||||
|
metadata:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
version: v19
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kubedns
|
||||||
|
image: gcr.io/google_containers/kubedns-amd64:1.7
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 170Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 70Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
args:
|
||||||
|
# command = "/kube-dns"
|
||||||
|
- --domain={{ dns_domain }}.
|
||||||
|
- --dns-port=10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-local
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 10053
|
||||||
|
name: dns-tcp-local
|
||||||
|
protocol: TCP
|
||||||
|
- name: dnsmasq
|
||||||
|
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
|
||||||
|
args:
|
||||||
|
- --log-facility=-
|
||||||
|
- --cache-size=1000
|
||||||
|
- --no-resolv
|
||||||
|
- --server=127.0.0.1#10053
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- name: healthz
|
||||||
|
image: gcr.io/google_containers/exechealthz-amd64:1.1
|
||||||
|
resources:
|
||||||
|
# keep request = limit to keep this container in guaranteed class
|
||||||
|
limits:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 50Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
# Note that this container shouldn't really need 50Mi of memory. The
|
||||||
|
# limits are set higher than expected pending investigation on #29688.
|
||||||
|
# The extra memory was stolen from the kubedns container to keep the
|
||||||
|
# net memory requested by the pod constant.
|
||||||
|
memory: 50Mi
|
||||||
|
args:
|
||||||
|
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
|
||||||
|
- -port=8080
|
||||||
|
- -quiet
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
20
roles/kubernetes-apps/ansible/templates/kubedns-svc.yml
Normal file
20
roles/kubernetes-apps/ansible/templates/kubedns-svc.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kubedns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kubedns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
kubernetes.io/name: "kubedns"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: kubedns
|
||||||
|
clusterIP: {{ skydns_server }}
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user