mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
322 Commits
release-2.
...
v2.11.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0ccda8a42 | ||
|
|
c8dad3f6c6 | ||
|
|
5ec9ab7ec0 | ||
|
|
73097aa39d | ||
|
|
86cc703c75 | ||
|
|
4dba34bd02 | ||
|
|
b0437516c1 | ||
|
|
da015e0249 | ||
|
|
554857da97 | ||
|
|
9bf23fa43b | ||
|
|
42287066d3 | ||
|
|
a1ff1de975 | ||
|
|
1bfbc5bbc4 | ||
|
|
c5b4d3ceaa | ||
|
|
8fc9c5d025 | ||
|
|
42bba66c02 | ||
|
|
53bc80bb59 | ||
|
|
771ce96e6d | ||
|
|
fc456ff0cd | ||
|
|
b4f70db878 | ||
|
|
5707f79b33 | ||
|
|
0a2f4edfc6 | ||
|
|
56fa46716e | ||
|
|
b74abe56fd | ||
|
|
62aecd1e4a | ||
|
|
973afef96e | ||
|
|
a235605d2c | ||
|
|
023108a733 | ||
|
|
75d1be8272 | ||
|
|
a44235d11b | ||
|
|
7abf6a6958 | ||
|
|
0d0b1fdf82 | ||
|
|
b710c72f04 | ||
|
|
678c316d01 | ||
|
|
bc6de32faf | ||
|
|
7cf8ad4dc7 | ||
|
|
02ec72fa40 | ||
|
|
d22634a597 | ||
|
|
4132cee687 | ||
|
|
f3df0d5f4a | ||
|
|
1d285e654d | ||
|
|
dc6ad64ec7 | ||
|
|
92bfcf0467 | ||
|
|
54b1fe83f3 | ||
|
|
5337cff179 | ||
|
|
1be788f785 | ||
|
|
8afbf339f7 | ||
|
|
8c935dfb50 | ||
|
|
66c5ed8406 | ||
|
|
4087e97505 | ||
|
|
da50ed0936 | ||
|
|
fbbfff3795 | ||
|
|
fb9103acd3 | ||
|
|
49d921cf91 | ||
|
|
fe29c97ae8 | ||
|
|
2abb6c8689 | ||
|
|
a3ca441998 | ||
|
|
9cf503acb1 | ||
|
|
1cbdd7ed5c | ||
|
|
428e52e0d1 | ||
|
|
70dc222719 | ||
|
|
69f796f0c7 | ||
|
|
5826f0810c | ||
|
|
de9443a694 | ||
|
|
99c5f7e013 | ||
|
|
d9dedc2cd5 | ||
|
|
23ae6027ab | ||
|
|
781b5691c9 | ||
|
|
fd9bbcb157 | ||
|
|
e0410661fa | ||
|
|
8ef754678a | ||
|
|
161a8f55fa | ||
|
|
7481cc31e1 | ||
|
|
b15b6e834f | ||
|
|
76640cf1a1 | ||
|
|
374ea7b81d | ||
|
|
46bef931e9 | ||
|
|
a36e9ae690 | ||
|
|
728155a2a1 | ||
|
|
cdf9a9f4fc | ||
|
|
29307740dd | ||
|
|
a038d62644 | ||
|
|
20c7e31ea3 | ||
|
|
65065e7fdf | ||
|
|
352297cf8d | ||
|
|
a67a50f9c0 | ||
|
|
324bc41097 | ||
|
|
c81b443d93 | ||
|
|
dc16ab92f4 | ||
|
|
53032a6695 | ||
|
|
d90a5f291b | ||
|
|
3b7791501e | ||
|
|
f2b8a3614d | ||
|
|
e89b47c7ee | ||
|
|
2aa66eb12d | ||
|
|
4c8b93e5b9 | ||
|
|
216631bf02 | ||
|
|
c7f3123e28 | ||
|
|
f599c2a691 | ||
|
|
bc7d1f36ea | ||
|
|
80fa294a31 | ||
|
|
465dfd68bc | ||
|
|
73f45fbe94 | ||
|
|
d270678bda | ||
|
|
de028814e5 | ||
|
|
b5406b752d | ||
|
|
6025981ceb | ||
|
|
4348e78b24 | ||
|
|
e2f9adc2ff | ||
|
|
f67a24499b | ||
|
|
5c704552d8 | ||
|
|
d83ea51101 | ||
|
|
fa6027e8f0 | ||
|
|
2849191e67 | ||
|
|
0559eec681 | ||
|
|
a3a7fe7c8e | ||
|
|
9b2d176617 | ||
|
|
7a3547e4d1 | ||
|
|
e6fb686156 | ||
|
|
5e80603bbb | ||
|
|
c8d95a1586 | ||
|
|
27a99e0a3f | ||
|
|
3cc351dff9 | ||
|
|
23c9071c30 | ||
|
|
14141ec137 | ||
|
|
5bec2edaf7 | ||
|
|
f504d0ea99 | ||
|
|
3b7797b1a1 | ||
|
|
aa63eb6196 | ||
|
|
23aa3e4638 | ||
|
|
56ae3bfec2 | ||
|
|
4d5c4a13cb | ||
|
|
69a8f91512 | ||
|
|
fa791cc344 | ||
|
|
456f743470 | ||
|
|
ab6f0012cc | ||
|
|
4afbf51d32 | ||
|
|
d62684b617 | ||
|
|
a8dfcbbfc7 | ||
|
|
bbdc6210f5 | ||
|
|
c7f6ed1495 | ||
|
|
818aa7aeb1 | ||
|
|
045acc724b | ||
|
|
d540560619 | ||
|
|
797bfd85b0 | ||
|
|
07cb8ebef7 | ||
|
|
54416cabfd | ||
|
|
3617ae31f6 | ||
|
|
4f05d801c3 | ||
|
|
956afcb33f | ||
|
|
6347419233 | ||
|
|
0c7a50fe1e | ||
|
|
7423932510 | ||
|
|
b41530ba5d | ||
|
|
29e916508c | ||
|
|
b45f3f0004 | ||
|
|
2a5721b4d4 | ||
|
|
e30a703c8e | ||
|
|
333f1a4a40 | ||
|
|
84b278021a | ||
|
|
1e470b0473 | ||
|
|
0ef3a7914c | ||
|
|
a3fff1e438 | ||
|
|
4bc204925a | ||
|
|
5d9946184a | ||
|
|
5ba169a612 | ||
|
|
872b37f751 | ||
|
|
8485136f9a | ||
|
|
ff1bc739f1 | ||
|
|
594a0e7f1b | ||
|
|
8e28ba38d2 | ||
|
|
73c2ff17dd | ||
|
|
13f225e6ae | ||
|
|
3f62492a15 | ||
|
|
5e3bd2dff1 | ||
|
|
787a9c74fa | ||
|
|
14749df6f3 | ||
|
|
2db2898112 | ||
|
|
3776000fc4 | ||
|
|
f0572e59e7 | ||
|
|
6217184c7f | ||
|
|
044dcbaed0 | ||
|
|
8a5eae94ea | ||
|
|
bf3c6aeed1 | ||
|
|
f3fbf995ca | ||
|
|
03bded2b6b | ||
|
|
d5c0829d61 | ||
|
|
00369303de | ||
|
|
1f1479c0a7 | ||
|
|
e67f848abc | ||
|
|
560f50d3cd | ||
|
|
3f45122d0d | ||
|
|
50bdaa573c | ||
|
|
24b6698cc9 | ||
|
|
73885d3b9e | ||
|
|
f29387316f | ||
|
|
d6fd0d2aca | ||
|
|
e814da1eec | ||
|
|
e029a09345 | ||
|
|
dcd9c9509b | ||
|
|
15eb7db36d | ||
|
|
a5b46bfc8c | ||
|
|
fbba259933 | ||
|
|
7b77e2d232 | ||
|
|
48a182844c | ||
|
|
9335cdcebc | ||
|
|
38af93b60c | ||
|
|
741de6051c | ||
|
|
b8f0de3074 | ||
|
|
88d919337e | ||
|
|
f518b90c6b | ||
|
|
d5c33e6d6c | ||
|
|
338eb4ce65 | ||
|
|
009e208bcd | ||
|
|
81e6877b02 | ||
|
|
3722acee85 | ||
|
|
a4a35f8a4f | ||
|
|
82119ca923 | ||
|
|
6ca2019002 | ||
|
|
53e3463b5a | ||
|
|
c9ed5f69d7 | ||
|
|
696d481e3b | ||
|
|
f5a83ceded | ||
|
|
3fe66a1298 | ||
|
|
6af1f65d3c | ||
|
|
4a10dca7d4 | ||
|
|
4d57ed314d | ||
|
|
86d0e12695 | ||
|
|
4e81bcc147 | ||
|
|
691baf5b14 | ||
|
|
6243467856 | ||
|
|
3c5a4474ac | ||
|
|
01da65252b | ||
|
|
f3e7615bef | ||
|
|
f47a666227 | ||
|
|
b708db4cd5 | ||
|
|
a3144e7e21 | ||
|
|
683efc5698 | ||
|
|
38a3075025 | ||
|
|
fc072300ea | ||
|
|
d25ecfe1c1 | ||
|
|
37d98e79ec | ||
|
|
a65605b17a | ||
|
|
424e59805f | ||
|
|
6df8111cd4 | ||
|
|
76db060afb | ||
|
|
d588532c9b | ||
|
|
d6d7458d68 | ||
|
|
228b244c84 | ||
|
|
d89ecb8308 | ||
|
|
50751bb610 | ||
|
|
64f48bf84c | ||
|
|
f8fdc0cd93 | ||
|
|
09fe95bc60 | ||
|
|
ada5941a70 | ||
|
|
88fe3403ce | ||
|
|
04f2682ac6 | ||
|
|
873b5608cf | ||
|
|
12086744e0 | ||
|
|
33ab615072 | ||
|
|
f696d7abee | ||
|
|
5a1cf19278 | ||
|
|
416e65509b | ||
|
|
4de6a78e26 | ||
|
|
026088deea | ||
|
|
f142e671b3 | ||
|
|
2f49b6caa8 | ||
|
|
50c86919dc | ||
|
|
781cc00cc4 | ||
|
|
05dc2b3a09 | ||
|
|
d0e628911c | ||
|
|
656633f784 | ||
|
|
530e1c329d | ||
|
|
f5aec8add4 | ||
|
|
f92309bfd0 | ||
|
|
ef10feb26f | ||
|
|
c6586829de | ||
|
|
b103385678 | ||
|
|
848191e97a | ||
|
|
04e3fb6a5a | ||
|
|
b218e17f44 | ||
|
|
bba6d0c613 | ||
|
|
49af1f9969 | ||
|
|
a6dc50e7cb | ||
|
|
f69b5f7f33 | ||
|
|
37eac010c8 | ||
|
|
d4b9f15c0a | ||
|
|
ec3daedf9e | ||
|
|
1cf76a10db | ||
|
|
d83181a2be | ||
|
|
b834a28891 | ||
|
|
78f6f6b889 | ||
|
|
0b02f6593b | ||
|
|
7f1d9ff543 | ||
|
|
c5fb734098 | ||
|
|
d5d3cfd3fa | ||
|
|
cc77a8c395 | ||
|
|
d39c273d96 | ||
|
|
316508626d | ||
|
|
46ba6a4154 | ||
|
|
d8cbbc414e | ||
|
|
ebae491e3f | ||
|
|
6f919e5020 | ||
|
|
4ff851b302 | ||
|
|
3af90f8772 | ||
|
|
cb54d074b5 | ||
|
|
9032e271f1 | ||
|
|
15597aa493 | ||
|
|
3b9d13fda9 | ||
|
|
5e0249ae7c | ||
|
|
27958e4247 | ||
|
|
353afa7cb0 | ||
|
|
e865c50574 | ||
|
|
a30ad1e5a5 | ||
|
|
586ad89d50 | ||
|
|
6caa639243 | ||
|
|
80f31818df | ||
|
|
854cc53fa5 | ||
|
|
d2a1ac3b0c | ||
|
|
a678d1be9d | ||
|
|
097806dfe8 | ||
|
|
7cdf1fd388 |
25
.ansible-lint
Normal file
25
.ansible-lint
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
parseable: true
|
||||||
|
skip_list:
|
||||||
|
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||||
|
# The following rules throw errors.
|
||||||
|
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||||
|
- '301'
|
||||||
|
- '305'
|
||||||
|
- '306'
|
||||||
|
- '404'
|
||||||
|
- '503'
|
||||||
|
|
||||||
|
# These rules are intentionally skipped:
|
||||||
|
#
|
||||||
|
# [E204]: "Lines should be no longer than 160 chars"
|
||||||
|
# This could be re-enabled with a major rewrite in the future.
|
||||||
|
# For now, there's not enough value gain from strictly limiting line length.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '204'
|
||||||
|
|
||||||
|
# [E701]: "meta/main.yml should contain relevant info"
|
||||||
|
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||||
|
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '701'
|
||||||
@@ -1,16 +1,11 @@
|
|||||||
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
---
|
||||||
|
name: Bug Report
|
||||||
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
about: Report a bug encountered while operating Kubernetes
|
||||||
|
labels: kind/bug
|
||||||
|
|
||||||
|
---
|
||||||
<!--
|
<!--
|
||||||
If this is a BUG REPORT, please:
|
Please, be ready for followup questions, and please respond in a timely
|
||||||
- Fill in as much of the template below as you can. If you leave out
|
|
||||||
information, we can't help you as well.
|
|
||||||
|
|
||||||
If this is a FEATURE REQUEST, please:
|
|
||||||
- Describe *in detail* the feature/behavior/change you'd like to see.
|
|
||||||
|
|
||||||
In both cases, be ready for followup questions, and please respond in a timely
|
|
||||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
explain why.
|
explain why.
|
||||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
name: Enhancement Request
|
||||||
|
about: Suggest an enhancement to the Kubespray project
|
||||||
|
labels: kind/feature
|
||||||
|
|
||||||
|
---
|
||||||
|
<!-- Please only use this template for submitting enhancement requests -->
|
||||||
|
|
||||||
|
**What would you like to be added**:
|
||||||
|
|
||||||
|
**Why is this needed**:
|
||||||
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Failing Test
|
||||||
|
about: Report test failures in Kubespray CI jobs
|
||||||
|
labels: kind/failing-test
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
||||||
|
|
||||||
|
**Which jobs are failing**:
|
||||||
|
|
||||||
|
**Which test(s) are failing**:
|
||||||
|
|
||||||
|
**Since when has it been failing**:
|
||||||
|
|
||||||
|
**Testgrid link**:
|
||||||
|
|
||||||
|
**Reason for failure**:
|
||||||
|
|
||||||
|
**Anything else we need to know**:
|
||||||
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
name: Support Request
|
||||||
|
about: Support request or question relating to Kubespray
|
||||||
|
labels: triage/support
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--
|
||||||
|
STOP -- PLEASE READ!
|
||||||
|
|
||||||
|
GitHub is not the right place for support requests.
|
||||||
|
|
||||||
|
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
||||||
|
|
||||||
|
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
||||||
|
|
||||||
|
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||||
|
-->
|
||||||
44
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
44
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||||
|
|
||||||
|
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||||
|
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||||
|
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
|
||||||
|
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
|
||||||
|
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||||
|
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||||
|
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||||
|
-->
|
||||||
|
|
||||||
|
**What type of PR is this?**
|
||||||
|
> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:
|
||||||
|
>
|
||||||
|
> /kind api-change
|
||||||
|
> /kind bug
|
||||||
|
> /kind cleanup
|
||||||
|
> /kind design
|
||||||
|
> /kind documentation
|
||||||
|
> /kind failing-test
|
||||||
|
> /kind feature
|
||||||
|
> /kind flake
|
||||||
|
|
||||||
|
**What this PR does / why we need it**:
|
||||||
|
|
||||||
|
**Which issue(s) this PR fixes**:
|
||||||
|
<!--
|
||||||
|
*Automatically closes linked issue when PR is merged.
|
||||||
|
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
|
||||||
|
_If PR is about `failing-tests or flakes`, please post the related issues/tests in a comment and do not use `Fixes`_*
|
||||||
|
-->
|
||||||
|
Fixes #
|
||||||
|
|
||||||
|
**Special notes for your reviewer**:
|
||||||
|
|
||||||
|
**Does this PR introduce a user-facing change?**:
|
||||||
|
<!--
|
||||||
|
If no, just write "NONE" in the release-note block below.
|
||||||
|
If yes, a release note is required:
|
||||||
|
Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
|
||||||
|
-->
|
||||||
|
```release-note
|
||||||
|
|
||||||
|
```
|
||||||
786
.gitlab-ci.yml
786
.gitlab-ci.yml
@@ -1,9 +1,10 @@
|
|||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- moderator
|
|
||||||
- deploy-part1
|
- deploy-part1
|
||||||
|
- moderator
|
||||||
- deploy-part2
|
- deploy-part2
|
||||||
|
- deploy-gce
|
||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
@@ -27,785 +28,44 @@ variables:
|
|||||||
UPGRADE_TEST: "false"
|
UPGRADE_TEST: "false"
|
||||||
LOG_LEVEL: "-vv"
|
LOG_LEVEL: "-vv"
|
||||||
|
|
||||||
# asia-east1-a
|
|
||||||
# asia-northeast1-a
|
|
||||||
# europe-west1-b
|
|
||||||
# us-central1-a
|
|
||||||
# us-east1-b
|
|
||||||
# us-west1-a
|
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- kubernetes
|
- packet
|
||||||
- docker
|
variables:
|
||||||
image: quay.io/kubespray/kubespray:v2.8
|
KUBESPRAY_VERSION: v2.10.0
|
||||||
|
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||||
.docker_service: &docker_service
|
|
||||||
services:
|
|
||||||
- docker:dind
|
|
||||||
|
|
||||||
.create_cluster: &create_cluster
|
|
||||||
<<: *job
|
|
||||||
<<: *docker_service
|
|
||||||
|
|
||||||
.gce_variables: &gce_variables
|
|
||||||
GCE_USER: travis
|
|
||||||
SSH_USER: $GCE_USER
|
|
||||||
CLOUD_MACHINE_TYPE: "g1-small"
|
|
||||||
CI_PLATFORM: "gce"
|
|
||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
|
||||||
|
|
||||||
.do_variables: &do_variables
|
|
||||||
PRIVATE_KEY: $DO_PRIVATE_KEY
|
|
||||||
CI_PLATFORM: "do"
|
|
||||||
SSH_USER: root
|
|
||||||
|
|
||||||
|
|
||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *docker_service
|
services:
|
||||||
cache:
|
- docker:dind
|
||||||
key: "$CI_BUILD_REF_NAME"
|
|
||||||
paths:
|
|
||||||
- downloads/
|
|
||||||
- $HOME/.cache
|
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- ./tests/scripts/rebase.sh
|
||||||
- /usr/bin/python -m pip install -r requirements.txt
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
|
||||||
- mkdir -p /.ssh
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- ansible-playbook --version
|
|
||||||
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
|
||||||
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
|
||||||
- echo "PYPATH is $PYPATH"
|
|
||||||
script:
|
script:
|
||||||
- pwd
|
- ./tests/scripts/testcases_run.sh
|
||||||
- ls
|
|
||||||
- echo ${PWD}
|
|
||||||
- echo "${STARTUP_SCRIPT}"
|
|
||||||
- cd tests && make create-${CI_PLATFORM} -s ; cd -
|
|
||||||
|
|
||||||
# Check out latest tag if testing upgrade
|
|
||||||
- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
|
||||||
# Checkout the CI vars file so it is available
|
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
|
||||||
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
|
|
||||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
|
||||||
|
|
||||||
|
|
||||||
# Create cluster
|
|
||||||
- >
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml
|
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
|
||||||
- >
|
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
|
||||||
git checkout "${CI_BUILD_REF}";
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
$PLAYBOOK;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
## Advanced DNS checks
|
|
||||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e reset_confirmation=yes
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
reset.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
|
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
|
||||||
fi
|
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
|
- ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
.gce: &gce
|
|
||||||
<<: *testcases
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
|
|
||||||
.do: &do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
<<: *testcases
|
|
||||||
|
|
||||||
# Test matrix. Leave the comments for markup scripts.
|
|
||||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_cilium_variables: &coreos_cilium_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.debian9_calico_variables: &debian9_calico_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_canal_variables: &coreos_canal_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_kube_router_variables: ¢os7_kube_router_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_kube_router_variables: &coreos_kube_router_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.opensuse_canal_variables: &opensuse_canal_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
|
||||||
### PR JOBS PART1
|
|
||||||
|
|
||||||
gce_ubuntu18-flannel-aio:
|
|
||||||
stage: deploy-part1
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *ubuntu18_flannel_aio_variables
|
|
||||||
<<: *gce_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
### PR JOBS PART2
|
|
||||||
|
|
||||||
gce_coreos-calico-aio:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *coreos_calico_aio_variables
|
|
||||||
<<: *gce_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_addons_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
### MANUAL JOBS
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_weave_sep_variables
|
|
||||||
when: manual
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_coreos-calico-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_calico_aio_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha-triggers:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_ha_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_addons_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_weave_sep_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
|
||||||
do_ubuntu-canal-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_kubeadm_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-flannel-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_flannel_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-contiv-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_contiv_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-cilium:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_cilium_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-cilium-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_cilium_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-weave:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_weave_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-weave-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_weave_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_debian9-calico-upgrade:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *debian9_calico_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_debian9-calico-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *debian9_calico_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_coreos-canal:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_canal_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-canal-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_canal_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_canal_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_canal_sep_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos7-calico-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_calico_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-calico-ha-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_calico_ha_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos7-kube-router:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_kube_router_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-multus-calico:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_multus_calico_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_opensuse-canal:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *opensuse_canal_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
|
||||||
gce_coreos-alpha-weave-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_alpha_weave_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-kube-router:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_kube_router_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-rkt-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_rkt_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-kube-router-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_kube_router_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
|
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||||
# Premoderated with manual actions
|
# Premoderated with manual actions
|
||||||
ci-authorized:
|
ci-authorized:
|
||||||
<<: *job
|
extends: .job
|
||||||
stage: moderator
|
stage: moderator
|
||||||
before_script:
|
|
||||||
- apt-get -y install jq
|
|
||||||
script:
|
script:
|
||||||
- /bin/sh scripts/premoderator.sh
|
- /bin/sh scripts/premoderator.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
# Disable ci moderator
|
||||||
|
only: []
|
||||||
|
|
||||||
syntax-check:
|
include:
|
||||||
<<: *job
|
- .gitlab-ci/lint.yml
|
||||||
stage: unit-tests
|
- .gitlab-ci/shellcheck.yml
|
||||||
script:
|
- .gitlab-ci/digital-ocean.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
- .gitlab-ci/terraform.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
- .gitlab-ci/packet.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
yamllint:
|
|
||||||
<<: *job
|
|
||||||
stage: unit-tests
|
|
||||||
script:
|
|
||||||
- yamllint .
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
tox-inventory-builder:
|
|
||||||
stage: unit-tests
|
|
||||||
<<: *job
|
|
||||||
script:
|
|
||||||
- pip install tox
|
|
||||||
- cd contrib/inventory_builder && tox
|
|
||||||
when: manual
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
|
|
||||||
# Tests for contrib/terraform/
|
|
||||||
.terraform_install: &terraform_install
|
|
||||||
<<: *job
|
|
||||||
before_script:
|
|
||||||
# Set Ansible config
|
|
||||||
- cp ansible.cfg ~/.ansible.cfg
|
|
||||||
# Install Terraform
|
|
||||||
- apt-get install -y unzip
|
|
||||||
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
|
|
||||||
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
|
|
||||||
# Prepare inventory
|
|
||||||
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
|
|
||||||
- cd inventory/$CLUSTER
|
|
||||||
- ln -s ../../contrib/terraform/$PROVIDER/hosts
|
|
||||||
- terraform init ../../contrib/terraform/$PROVIDER
|
|
||||||
# Copy SSH keypair
|
|
||||||
- mkdir -p ~/.ssh
|
|
||||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
|
||||||
- chmod 400 ~/.ssh/id_rsa
|
|
||||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
|
||||||
- export TF_VAR_public_key_path=""
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
.terraform_validate: &terraform_validate
|
|
||||||
<<: *terraform_install
|
|
||||||
stage: unit-tests
|
|
||||||
script:
|
|
||||||
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
|
|
||||||
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
|
|
||||||
|
|
||||||
.terraform_apply: &terraform_apply
|
|
||||||
<<: *terraform_install
|
|
||||||
stage: deploy-part2
|
|
||||||
when: manual
|
|
||||||
script:
|
|
||||||
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
|
|
||||||
- ansible-playbook -i hosts ../../cluster.yml
|
|
||||||
after_script:
|
|
||||||
# Cleanup regardless of exit code
|
|
||||||
- cd inventory/$CLUSTER
|
|
||||||
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
|
|
||||||
|
|
||||||
tf-validate-openstack:
|
|
||||||
<<: *terraform_validate
|
|
||||||
variables:
|
|
||||||
TF_VERSION: 0.11.11
|
|
||||||
PROVIDER: openstack
|
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
|
|
||||||
tf-validate-packet:
|
|
||||||
<<: *terraform_validate
|
|
||||||
variables:
|
|
||||||
TF_VERSION: 0.11.11
|
|
||||||
PROVIDER: packet
|
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
|
|
||||||
tf-apply-packet:
|
|
||||||
<<: *terraform_apply
|
|
||||||
variables:
|
|
||||||
TF_VERSION: 0.11.11
|
|
||||||
PROVIDER: packet
|
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
TF_VAR_cluster_name: $CI_COMMIT_REF_NAME
|
|
||||||
TF_VAR_number_of_k8s_masters: "1"
|
|
||||||
TF_VAR_number_of_k8s_nodes: "1"
|
|
||||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
|
||||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
|
||||||
TF_VAR_facility: "ewr1"
|
|
||||||
|
|||||||
19
.gitlab-ci/digital-ocean.yml
Normal file
19
.gitlab-ci/digital-ocean.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
.do_variables: &do_variables
|
||||||
|
PRIVATE_KEY: $DO_PRIVATE_KEY
|
||||||
|
CI_PLATFORM: "do"
|
||||||
|
SSH_USER: root
|
||||||
|
|
||||||
|
.do: &do
|
||||||
|
extends: .testcases
|
||||||
|
tags:
|
||||||
|
- do
|
||||||
|
|
||||||
|
do_ubuntu-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .do
|
||||||
|
variables:
|
||||||
|
<<: *do_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
247
.gitlab-ci/gce.yml
Normal file
247
.gitlab-ci/gce.yml
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
---
|
||||||
|
.gce_variables: &gce_variables
|
||||||
|
GCE_USER: travis
|
||||||
|
SSH_USER: $GCE_USER
|
||||||
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
|
CI_PLATFORM: "gce"
|
||||||
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
|
|
||||||
|
.cache: &cache
|
||||||
|
cache:
|
||||||
|
key: "$CI_BUILD_REF_NAME"
|
||||||
|
paths:
|
||||||
|
- downloads/
|
||||||
|
- $HOME/.cache
|
||||||
|
|
||||||
|
.gce: &gce
|
||||||
|
extends: .testcases
|
||||||
|
<<: *cache
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
tags:
|
||||||
|
- gce
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||||
|
# stage: deploy-part1
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||||
|
# stage: deploy-gce
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
|
### PR JOBS PART1
|
||||||
|
|
||||||
|
gce_ubuntu18-flannel-aio:
|
||||||
|
stage: deploy-part1
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
### PR JOBS PART2
|
||||||
|
|
||||||
|
gce_coreos-calico-aio:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
gce_centos7-flannel-addons:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
### MANUAL JOBS
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-sep:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-weave-sep:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_coreos-calico-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha-triggers:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-flannel-addons-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-weave-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-canal-kubeadm:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-canal-kubeadm-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-flannel-ha:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-contiv-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-cilium:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu18-cilium-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-weave:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-weave-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_debian9-calico-upgrade:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_debian9-calico-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_coreos-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-canal-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_rhel7-canal-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-canal-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-calico-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos7-calico-ha-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos7-multus-calico:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos7_multus_calico_variables
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_oracle-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_opensuse-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||||
|
gce_coreos-alpha-weave-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-kube-router-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
49
.gitlab-ci/lint.yml
Normal file
49
.gitlab-ci/lint.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
yamllint:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- yamllint --strict .
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
vagrant-validate:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
||||||
|
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
||||||
|
- vagrant validate --ignore-provider
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
ansible-lint:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||||
|
script: |-
|
||||||
|
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
syntax-check:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
variables:
|
||||||
|
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||||
|
ANSIBLE_REMOTE_USER: root
|
||||||
|
ANSIBLE_BECOME: "true"
|
||||||
|
ANSIBLE_BECOME_USER: root
|
||||||
|
ANSIBLE_VERBOSITY: "3"
|
||||||
|
script:
|
||||||
|
- ansible-playbook --syntax-check cluster.yml
|
||||||
|
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||||
|
- ansible-playbook --syntax-check reset.yml
|
||||||
|
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
tox-inventory-builder:
|
||||||
|
stage: unit-tests
|
||||||
|
extends: .job
|
||||||
|
script:
|
||||||
|
- pip install tox
|
||||||
|
- cd contrib/inventory_builder && tox
|
||||||
|
when: manual
|
||||||
|
except: ['triggers', 'master']
|
||||||
122
.gitlab-ci/packet.yml
Normal file
122
.gitlab-ci/packet.yml
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
---
|
||||||
|
.packet_variables: &packet_variables
|
||||||
|
CI_PLATFORM: "packet"
|
||||||
|
SSH_USER: "kubespray"
|
||||||
|
|
||||||
|
.packet: &packet
|
||||||
|
extends: .testcases
|
||||||
|
variables:
|
||||||
|
<<: *packet_variables
|
||||||
|
tags:
|
||||||
|
- packet
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
except: ['triggers']
|
||||||
|
|
||||||
|
.test-upgrade: &test-upgrade
|
||||||
|
variables:
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
packet_ubuntu18-calico-aio:
|
||||||
|
stage: deploy-part1
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
|
packet_centos7-flannel-addons:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
|
packet_centos-weave-kubeadm-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
packet_ubuntu-weave-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
|
||||||
|
packet_ubuntu-canal-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-canal-kubeadm:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu-flannel-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-contiv-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu18-cilium-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu18-flannel-containerd:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_debian9-macvlan-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian9-calico-upgrade:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-calico-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_centos7-kube-ovn:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-kube-router:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-multus-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_opensuse-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_oracle-7-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-kube-router-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
15
.gitlab-ci/shellcheck.yml
Normal file
15
.gitlab-ci/shellcheck.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
shellcheck:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
variables:
|
||||||
|
SHELLCHECK_VERSION: v0.6.0
|
||||||
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
|
- curl --silent "https://storage.googleapis.com/shellcheck/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||||
|
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||||
|
- shellcheck --version
|
||||||
|
script:
|
||||||
|
# Run shellcheck for all *.sh except contrib/
|
||||||
|
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
||||||
|
except: ['triggers', 'master']
|
||||||
162
.gitlab-ci/terraform.yml
Normal file
162
.gitlab-ci/terraform.yml
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
# Tests for contrib/terraform/
|
||||||
|
.terraform_install:
|
||||||
|
extends: .job
|
||||||
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
|
- ./tests/scripts/terraform_install.sh
|
||||||
|
# Set Ansible config
|
||||||
|
- cp ansible.cfg ~/.ansible.cfg
|
||||||
|
# Prepare inventory
|
||||||
|
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||||
|
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
|
||||||
|
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||||
|
- terraform init contrib/terraform/$PROVIDER
|
||||||
|
# Copy SSH keypair
|
||||||
|
- mkdir -p ~/.ssh
|
||||||
|
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||||
|
- chmod 400 ~/.ssh/id_rsa
|
||||||
|
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||||
|
|
||||||
|
.terraform_validate:
|
||||||
|
extends: .terraform_install
|
||||||
|
stage: unit-tests
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
script:
|
||||||
|
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||||
|
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
|
||||||
|
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||||
|
|
||||||
|
.terraform_apply:
|
||||||
|
extends: .terraform_install
|
||||||
|
stage: deploy-part2
|
||||||
|
when: manual
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
variables:
|
||||||
|
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||||
|
ANSIBLE_INVENTORY: hosts
|
||||||
|
CI_PLATFORM: tf
|
||||||
|
TF_VAR_ssh_user: $SSH_USER
|
||||||
|
TF_VAR_cluster_name: $CI_JOB_ID
|
||||||
|
script:
|
||||||
|
- tests/scripts/testcases_run.sh
|
||||||
|
after_script:
|
||||||
|
# Cleanup regardless of exit code
|
||||||
|
- ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
|
tf-validate-openstack:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-validate-packet:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-validate-aws:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: aws
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-packet-ubuntu16-default:
|
||||||
|
extends: .terraform_apply
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
TF_VAR_number_of_k8s_masters: "1"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "1"
|
||||||
|
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
|
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
|
TF_VAR_facility: ewr1
|
||||||
|
TF_VAR_public_key_path: ""
|
||||||
|
TF_VAR_operating_system: ubuntu_16_04
|
||||||
|
|
||||||
|
tf-packet-ubuntu18-default:
|
||||||
|
extends: .terraform_apply
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
TF_VAR_number_of_k8s_masters: "1"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "1"
|
||||||
|
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
|
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
|
TF_VAR_facility: ams1
|
||||||
|
TF_VAR_public_key_path: ""
|
||||||
|
TF_VAR_operating_system: ubuntu_18_04
|
||||||
|
|
||||||
|
.ovh_variables: &ovh_variables
|
||||||
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
|
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||||
|
OS_PROJECT_NAME: "9361447987648822"
|
||||||
|
OS_USER_DOMAIN_NAME: Default
|
||||||
|
OS_PROJECT_DOMAIN_ID: default
|
||||||
|
OS_USERNAME: 8XuhBMfkKVrk
|
||||||
|
OS_REGION_NAME: UK1
|
||||||
|
OS_INTERFACE: public
|
||||||
|
OS_IDENTITY_API_VERSION: "3"
|
||||||
|
|
||||||
|
tf-ovh_ubuntu18-calico:
|
||||||
|
extends: .terraform_apply
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
<<: *ovh_variables
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
ANSIBLE_TIMEOUT: "60"
|
||||||
|
SSH_USER: ubuntu
|
||||||
|
TF_VAR_number_of_k8s_masters: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||||
|
TF_VAR_number_of_etcd: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||||
|
TF_VAR_number_of_bastions: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||||
|
TF_VAR_use_neutron: "0"
|
||||||
|
TF_VAR_floatingip_pool: "Ext-Net"
|
||||||
|
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||||
|
TF_VAR_network_name: "Ext-Net"
|
||||||
|
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
|
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
|
TF_VAR_image: "Ubuntu 18.04"
|
||||||
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
|
tf-ovh_coreos-calico:
|
||||||
|
extends: .terraform_apply
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
<<: *ovh_variables
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
ANSIBLE_TIMEOUT: "60"
|
||||||
|
SSH_USER: core
|
||||||
|
TF_VAR_number_of_k8s_masters: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||||
|
TF_VAR_number_of_etcd: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||||
|
TF_VAR_number_of_bastions: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||||
|
TF_VAR_use_neutron: "0"
|
||||||
|
TF_VAR_floatingip_pool: "Ext-Net"
|
||||||
|
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||||
|
TF_VAR_network_name: "Ext-Net"
|
||||||
|
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||||
|
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||||
|
TF_VAR_image: "CoreOS Stable"
|
||||||
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
11
Dockerfile
11
Dockerfile
@@ -1,11 +1,11 @@
|
|||||||
FROM ubuntu:16.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN mkdir /kubespray
|
RUN mkdir /kubespray
|
||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
RUN apt update -y && \
|
RUN apt update -y && \
|
||||||
apt install -y \
|
apt install -y \
|
||||||
libssl-dev python-dev sshpass apt-transport-https jq \
|
libssl-dev python3-dev sshpass apt-transport-https jq \
|
||||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||||
add-apt-repository \
|
add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||||
@@ -13,7 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
|||||||
stable" \
|
stable" \
|
||||||
&& apt update -y && apt-get install docker-ce -y
|
&& apt update -y && apt-get install docker-ce -y
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
|
||||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.11.3/bin/linux/amd64/kubectl \
|
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
||||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
|||||||
@@ -18,3 +18,4 @@ aliases:
|
|||||||
- chapsuk
|
- chapsuk
|
||||||
- mirwan
|
- mirwan
|
||||||
- miouge1
|
- miouge1
|
||||||
|
- holmsten
|
||||||
|
|||||||
49
README.md
49
README.md
@@ -29,17 +29,17 @@ To deploy the cluster you can use :
|
|||||||
|
|
||||||
# Update Ansible inventory file with inventory builder
|
# Update Ansible inventory file with inventory builder
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
# The option `-b` is required, as for example writing SSL keys in /etc/,
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
# installing packages and interacting with various systemd daemons.
|
# installing packages and interacting with various systemd daemons.
|
||||||
# Without -b the playbook will fail to run!
|
# Without --become the playbook will fail to run!
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini --become --become-user=root cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
As a consequence, `ansible-playbook` command will fail with:
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
@@ -101,6 +101,7 @@ Supported Linux Distributions
|
|||||||
- **Fedora** 28
|
- **Fedora** 28
|
||||||
- **Fedora/CentOS** Atomic
|
- **Fedora/CentOS** Atomic
|
||||||
- **openSUSE** Leap 42.3/Tumbleweed
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
|
- **Oracle Linux** 7
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
@@ -108,37 +109,33 @@ Supported Components
|
|||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.5
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
|
||||||
- [etcd](https://github.com/coreos/etcd) v3.2.26
|
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
||||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
|
||||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.4.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||||
|
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||||
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
|
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.5.1
|
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
||||||
- Application
|
- Application
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.4.0
|
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
|
||||||
|
|
||||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
|
||||||
plugins' related OS services. Also note, only one of the supported network
|
|
||||||
plugins can be deployed for a given single cluster.
|
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
- **Minimum required version of Kubernetes is v1.14**
|
||||||
- **Ansible v2.7.6 (or newer) and python-netaddr is installed on the machine
|
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
|
||||||
that will run Ansible commands**
|
that will run Ansible commands**
|
||||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||||
@@ -150,7 +147,7 @@ Requirements
|
|||||||
should be configured in the target servers. Then the `ansible_become` flag
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
- Master
|
- Master
|
||||||
@@ -161,7 +158,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
|||||||
Network Plugins
|
Network Plugins
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
@@ -175,13 +172,17 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
|||||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||||
|
|
||||||
|
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||||
|
|
||||||
|
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
@@ -205,7 +206,7 @@ Tools and projects on top of Kubespray
|
|||||||
CI Tests
|
CI Tests
|
||||||
--------
|
--------
|
||||||
|
|
||||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE)
|
CI/end-to-end tests sponsored by Google (GCE)
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
|
|||||||
18
Vagrantfile
vendored
18
Vagrantfile
vendored
@@ -21,10 +21,11 @@ SUPPORTED_OS = {
|
|||||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||||
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
@@ -180,11 +181,20 @@ Vagrant.configure("2") do |config|
|
|||||||
"flannel_interface": "eth1",
|
"flannel_interface": "eth1",
|
||||||
"kube_network_plugin": $network_plugin,
|
"kube_network_plugin": $network_plugin,
|
||||||
"kube_network_plugin_multus": $multi_networking,
|
"kube_network_plugin_multus": $multi_networking,
|
||||||
"docker_keepcache": "1",
|
"download_run_once": "True",
|
||||||
"download_run_once": "False",
|
|
||||||
"download_localhost": "False",
|
"download_localhost": "False",
|
||||||
|
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
||||||
|
# Make kubespray cache even when download_run_once is false
|
||||||
|
"download_force_cache": "True",
|
||||||
|
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
||||||
|
"download_keep_remote_cache": "False",
|
||||||
|
"docker_keepcache": "1",
|
||||||
|
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
||||||
|
"kubeconfig_localhost": "True",
|
||||||
|
"kubectl_localhost": "True",
|
||||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}"
|
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||||
|
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
|||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||||
|
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||||
|
force_valid_group_names = ignore
|
||||||
|
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
gathering = smart
|
gathering = smart
|
||||||
|
|||||||
66
cluster.yml
66
cluster.yml
@@ -3,11 +3,11 @@
|
|||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
become: no
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Check ansible version >=2.7.6"
|
- name: "Check ansible version >=2.7.8"
|
||||||
assert:
|
assert:
|
||||||
msg: "Ansible must be v2.7.6 or higher"
|
msg: "Ansible must be v2.7.8 or higher"
|
||||||
that:
|
that:
|
||||||
- ansible_version.string is version("2.7.6", ">=")
|
- ansible_version.string is version("2.7.8", ">=")
|
||||||
tags:
|
tags:
|
||||||
- check
|
- check
|
||||||
vars:
|
vars:
|
||||||
@@ -19,57 +19,50 @@
|
|||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
|
||||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
|
||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
|
||||||
ansible_ssh_pipelining: false
|
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
vars:
|
|
||||||
ansible_ssh_pipelining: true
|
|
||||||
gather_facts: false
|
|
||||||
pre_tasks:
|
|
||||||
- name: gather facts from all instances
|
|
||||||
setup:
|
|
||||||
delegate_to: "{{item}}"
|
|
||||||
delegate_facts: true
|
|
||||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
|
vars:
|
||||||
|
etcd_cluster_setup: true
|
||||||
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster:calico-rr
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
|
vars:
|
||||||
|
etcd_cluster_setup: false
|
||||||
|
etcd_events_cluster_setup: false
|
||||||
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -86,6 +79,12 @@
|
|||||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
|
- hosts: calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -102,16 +101,15 @@
|
|||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults}
|
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|||||||
@@ -42,8 +42,11 @@ class SearchEC2Tags(object):
|
|||||||
region = os.environ['REGION']
|
region = os.environ['REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
cluster_name = os.getenv('CLUSTER_NAME')
|
||||||
|
if cluster_name:
|
||||||
|
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
|
||||||
|
instances = ec2.instances.filter(Filters=filters)
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
|
|
||||||
##Suppose default vpc_visibility is private
|
##Suppose default vpc_visibility is private
|
||||||
|
|||||||
@@ -4,8 +4,11 @@
|
|||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- set_fact:
|
- name: Set vm_list
|
||||||
|
set_fact:
|
||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|||||||
@@ -8,9 +8,22 @@
|
|||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- set_fact:
|
- name: Query Azure Load Balancer Public IP
|
||||||
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
- name: Set VM IP, roles lists and load balancer public IP
|
||||||
|
set_fact:
|
||||||
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||||
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|
||||||
|
- name: Generate Load Balancer variables
|
||||||
|
template:
|
||||||
|
src: loadbalancer_vars.j2
|
||||||
|
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
## External LB example config
|
||||||
|
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
|
||||||
|
loadbalancer_apiserver:
|
||||||
|
address: {{ lb_pubip.ipAddress }}
|
||||||
|
port: 6443
|
||||||
|
|
||||||
|
## Internal loadbalancers for apiservers
|
||||||
|
loadbalancer_apiserver_localhost: false
|
||||||
@@ -29,7 +29,7 @@ sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
|||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.2"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,18 @@
|
|||||||
---
|
---
|
||||||
- set_fact:
|
- name: Set base_dir
|
||||||
base_dir: "{{playbook_dir}}/.generated/"
|
set_fact:
|
||||||
|
base_dir: "{{ playbook_dir }}/.generated/"
|
||||||
|
|
||||||
- file: path={{base_dir}} state=directory recurse=true
|
- name: Create base_dir
|
||||||
|
file:
|
||||||
|
path: "{{ base_dir }}"
|
||||||
|
state: directory
|
||||||
|
recurse: true
|
||||||
|
|
||||||
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
- name: Store json files in base_dir
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ base_dir }}/{{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- network.json
|
- network.json
|
||||||
- storage.json
|
- storage.json
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
- name: Null-ify some linux tools to ease DIND
|
- name: Null-ify some linux tools to ease DIND
|
||||||
file:
|
file:
|
||||||
src: "/bin/true"
|
src: "/bin/true"
|
||||||
dest: "{{item}}"
|
dest: "{{ item }}"
|
||||||
state: link
|
state: link
|
||||||
force: yes
|
force: yes
|
||||||
with_items:
|
with_items:
|
||||||
@@ -52,7 +52,7 @@
|
|||||||
- rsyslog
|
- rsyslog
|
||||||
- "{{ distro_ssh_service }}"
|
- "{{ distro_ssh_service }}"
|
||||||
|
|
||||||
- name: Create distro user "{{distro_user}}"
|
- name: Create distro user "{{ distro_user }}"
|
||||||
user:
|
user:
|
||||||
name: "{{ distro_user }}"
|
name: "{{ distro_user }}"
|
||||||
uid: 1000
|
uid: 1000
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
- /lib/modules:/lib/modules
|
- /lib/modules:/lib/modules
|
||||||
- "{{ item }}:/dind/docker"
|
- "{{ item }}:/dind/docker"
|
||||||
register: containers
|
register: containers
|
||||||
with_items: "{{groups.containers}}"
|
with_items: "{{ groups.containers }}"
|
||||||
tags:
|
tags:
|
||||||
- addresses
|
- addresses
|
||||||
|
|
||||||
@@ -79,6 +79,7 @@
|
|||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
|
# noqa 302 - this task uses the raw module intentionally
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ def get_var_as_bool(name, default):
|
|||||||
|
|
||||||
|
|
||||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||||
|
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||||
# Reconfigures cluster distribution at scale
|
# Reconfigures cluster distribution at scale
|
||||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||||
@@ -93,14 +94,16 @@ class KubesprayInventory(object):
|
|||||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||||
self.set_all(self.hosts)
|
self.set_all(self.hosts)
|
||||||
self.set_k8s_cluster()
|
self.set_k8s_cluster()
|
||||||
self.set_etcd(list(self.hosts.keys())[:3])
|
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||||
|
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
self.set_kube_master(list(self.hosts.keys())[3:5])
|
self.set_kube_master(list(self.hosts.keys())[
|
||||||
|
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||||
else:
|
else:
|
||||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||||
self.set_kube_node(self.hosts.keys())
|
self.set_kube_node(self.hosts.keys())
|
||||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
self.set_calico_rr(list(self.hosts.keys())[:3])
|
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||||
else: # Show help if no options
|
else: # Show help if no options
|
||||||
self.show_help()
|
self.show_help()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -202,11 +205,11 @@ class KubesprayInventory(object):
|
|||||||
try:
|
try:
|
||||||
# Python 3.x
|
# Python 3.x
|
||||||
start = int(ip_address(start_address))
|
start = int(ip_address(start_address))
|
||||||
end = int(ip_address(end_address))
|
end = int(ip_address(end_address))
|
||||||
except:
|
except:
|
||||||
# Python 2.7
|
# Python 2.7
|
||||||
start = int(ip_address(unicode(start_address)))
|
start = int(ip_address(unicode(start_address)))
|
||||||
end = int(ip_address(unicode(end_address)))
|
end = int(ip_address(unicode(end_address)))
|
||||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
|
|||||||
@@ -1,15 +1,9 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Upgrade all packages to the latest version (yum)
|
|
||||||
yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest
|
|
||||||
when: ansible_os_family == "RedHat"
|
|
||||||
|
|
||||||
- name: Install required packages
|
- name: Install required packages
|
||||||
yum:
|
yum:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: latest
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- bind-utils
|
- bind-utils
|
||||||
- ntp
|
- ntp
|
||||||
@@ -21,23 +15,13 @@
|
|||||||
update_cache: yes
|
update_cache: yes
|
||||||
cache_valid_time: 3600
|
cache_valid_time: 3600
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: latest
|
state: present
|
||||||
install_recommends: no
|
install_recommends: no
|
||||||
with_items:
|
with_items:
|
||||||
- dnsutils
|
- dnsutils
|
||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: Upgrade all packages to the latest version (apt)
|
|
||||||
shell: apt-get -o \
|
|
||||||
Dpkg::Options::=--force-confdef -o \
|
|
||||||
Dpkg::Options::=--force-confold -q -y \
|
|
||||||
dist-upgrade
|
|
||||||
environment:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
|
|
||||||
# Create deployment user if required
|
# Create deployment user if required
|
||||||
- include: user.yml
|
- include: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|||||||
@@ -2,9 +2,11 @@
|
|||||||
```
|
```
|
||||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||||
```
|
```
|
||||||
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
```
|
```
|
||||||
|
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
|
||||||
|
|
||||||
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,6 +1,12 @@
|
|||||||
---
|
---
|
||||||
metallb:
|
metallb:
|
||||||
ip_range: "10.5.0.50-10.5.0.99"
|
ip_range: "10.5.0.50-10.5.0.99"
|
||||||
|
protocol: "layer2"
|
||||||
|
# additional_address_pools:
|
||||||
|
# kube_service_pool:
|
||||||
|
# ip_range: "10.5.1.50-10.5.1.99"
|
||||||
|
# protocol: "layer2"
|
||||||
|
# auto_assign: false
|
||||||
limits:
|
limits:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
memory: "100Mi"
|
memory: "100Mi"
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
name: "MetalLB"
|
name: "MetalLB"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
@@ -8,6 +8,14 @@ data:
|
|||||||
config: |
|
config: |
|
||||||
address-pools:
|
address-pools:
|
||||||
- name: loadbalanced
|
- name: loadbalanced
|
||||||
protocol: layer2
|
protocol: {{ metallb.protocol }}
|
||||||
addresses:
|
addresses:
|
||||||
- {{ metallb.ip_range }}
|
- {{ metallb.ip_range }}
|
||||||
|
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||||
|
- name: {{ pool }}
|
||||||
|
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||||
|
addresses:
|
||||||
|
- {{ metallb.additional_address_pools[pool].ip_range }}
|
||||||
|
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
15
contrib/misc/clusteradmin-rbac.yml
Normal file
15
contrib/misc/clusteradmin-rbac.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: kube-system
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
template:
|
||||||
|
src: "{{ item.file }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
@@ -12,9 +14,9 @@
|
|||||||
kube:
|
kube:
|
||||||
name: glusterfs
|
name: glusterfs
|
||||||
namespace: default
|
namespace: default
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -14,3 +14,5 @@ ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contr
|
|||||||
```
|
```
|
||||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||||
@@ -16,15 +17,20 @@
|
|||||||
register: "initial_heketi_pod"
|
register: "initial_heketi_pod"
|
||||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||||
- set_fact:
|
|
||||||
|
- name: Store the initial heketi pod name
|
||||||
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
@@ -42,6 +48,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_storage_state"
|
register: "heketi_storage_state"
|
||||||
|
|
||||||
# ensure endpoints actually exist before trying to move database data to it
|
# ensure endpoints actually exist before trying to move database data to it
|
||||||
- name: "Create heketi storage."
|
- name: "Create heketi storage."
|
||||||
include_tasks: "bootstrap/storage.yml"
|
include_tasks: "bootstrap/storage.yml"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Wait for heketi bootstrap to complete."
|
- name: "Wait for heketi bootstrap to complete."
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Create heketi storage."
|
- name: "Create heketi storage."
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
state: "present"
|
state: "present"
|
||||||
vars:
|
vars:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||||
@@ -33,6 +33,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
---
|
---
|
||||||
- register: "label_present"
|
- name: Get storage nodes
|
||||||
|
register: "label_present"
|
||||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines|length == 0"
|
when: "label_present.stdout_lines|length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
- register: "label_present"
|
|
||||||
|
- name: Get storage nodes again
|
||||||
|
register: "label_present"
|
||||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
|
|
||||||
|
- name: Ensure the label has been set
|
||||||
|
assert:
|
||||||
|
that: "label_present|length > 0"
|
||||||
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -1,19 +1,24 @@
|
|||||||
---
|
---
|
||||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||||
become: true
|
become: true
|
||||||
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
|
template:
|
||||||
|
src: "heketi-deployment.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|
||||||
- name: "Ensure heketi is up and running."
|
- name: "Ensure heketi is up and running."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_state"
|
register: "heketi_state"
|
||||||
vars:
|
vars:
|
||||||
heketi_state: { stdout: "{}" }
|
heketi_state:
|
||||||
|
stdout: "{}"
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
@@ -22,5 +27,7 @@
|
|||||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- set_fact:
|
|
||||||
|
- name: Set the Heketi pod name
|
||||||
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
- name: "Kubernetes Apps | Test Heketi"
|
- name: "Kubernetes Apps | Test Heketi"
|
||||||
register: "heketi_service_state"
|
register: "heketi_service_state"
|
||||||
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||||
|
|||||||
@@ -1,31 +1,44 @@
|
|||||||
---
|
---
|
||||||
- register: "clusterrolebinding_state"
|
- name: Get clusterrolebindings
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
register: "clusterrolebinding_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
when: "clusterrolebinding_state.stdout == \"\""
|
when: "clusterrolebinding_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
- register: "clusterrolebinding_state"
|
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
- name: Get clusterrolebindings again
|
||||||
|
register: "clusterrolebinding_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert:
|
|
||||||
|
- name: Make sure that clusterrolebindings are present now
|
||||||
|
assert:
|
||||||
that: "clusterrolebinding_state.stdout != \"\""
|
that: "clusterrolebinding_state.stdout != \"\""
|
||||||
msg: "Cluster role binding is not present."
|
msg: "Cluster role binding is not present."
|
||||||
|
|
||||||
- register: "secret_state"
|
- name: Get the heketi-config-secret secret
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
register: "secret_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Render Heketi secret configuration."
|
- name: "Render Heketi secret configuration."
|
||||||
become: true
|
become: true
|
||||||
template:
|
template:
|
||||||
src: "heketi.json.j2"
|
src: "heketi.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi.json"
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
|
|
||||||
- name: "Deploy Heketi config secret"
|
- name: "Deploy Heketi config secret"
|
||||||
when: "secret_state.stdout == \"\""
|
when: "secret_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
- register: "secret_state"
|
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
- name: Get the heketi-config-secret secret again
|
||||||
|
register: "secret_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert:
|
|
||||||
|
- name: Make sure the heketi-config-secret secret exists now
|
||||||
|
assert:
|
||||||
that: "secret_state.stdout != \"\""
|
that: "secret_state.stdout != \"\""
|
||||||
msg: "Heketi config secret is not present."
|
msg: "Heketi config secret is not present."
|
||||||
|
|||||||
@@ -7,6 +7,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -20,6 +20,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@
|
|||||||
},
|
},
|
||||||
"readinessProbe": {
|
"readinessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 60,
|
"initialDelaySeconds": 3,
|
||||||
"exec": {
|
"exec": {
|
||||||
"command": [
|
"command": [
|
||||||
"/bin/bash",
|
"/bin/bash",
|
||||||
@@ -80,7 +80,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 60,
|
"initialDelaySeconds": 10,
|
||||||
"exec": {
|
"exec": {
|
||||||
"command": [
|
"command": [
|
||||||
"/bin/bash",
|
"/bin/bash",
|
||||||
|
|||||||
@@ -56,7 +56,7 @@
|
|||||||
"serviceAccountName": "heketi-service-account",
|
"serviceAccountName": "heketi-service-account",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"image": "heketi/heketi:7",
|
"image": "heketi/heketi:9",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"name": "deploy-heketi",
|
"name": "deploy-heketi",
|
||||||
"env": [
|
"env": [
|
||||||
@@ -106,7 +106,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 30,
|
"initialDelaySeconds": 10,
|
||||||
"httpGet": {
|
"httpGet": {
|
||||||
"path": "/hello",
|
"path": "/hello",
|
||||||
"port": 8080
|
"port": 8080
|
||||||
|
|||||||
@@ -68,7 +68,7 @@
|
|||||||
"serviceAccountName": "heketi-service-account",
|
"serviceAccountName": "heketi-service-account",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"image": "heketi/heketi:7",
|
"image": "heketi/heketi:9",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"name": "heketi",
|
"name": "heketi",
|
||||||
"env": [
|
"env": [
|
||||||
@@ -122,7 +122,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 30,
|
"initialDelaySeconds": 10,
|
||||||
"httpGet": {
|
"httpGet": {
|
||||||
"path": "/hello",
|
"path": "/hello",
|
||||||
"port": 8080
|
"port": 8080
|
||||||
|
|||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
heketi_remove_lvm: false
|
||||||
@@ -14,6 +14,8 @@
|
|||||||
when: "ansible_os_family == 'Debian'"
|
when: "ansible_os_family == 'Debian'"
|
||||||
|
|
||||||
- name: "Get volume group information."
|
- name: "Get volume group information."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||||
register: "volume_groups"
|
register: "volume_groups"
|
||||||
@@ -21,12 +23,16 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups."
|
- name: "Remove volume groups."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
command: "vgremove {{ volume_group }} --yes"
|
command: "vgremove {{ volume_group }} --yes"
|
||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks."
|
- name: "Remove physical volume from cluster disks."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
@@ -36,11 +42,11 @@
|
|||||||
yum:
|
yum:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "absent"
|
state: "absent"
|
||||||
when: "ansible_os_family == 'RedHat'"
|
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||||
|
|
||||||
- name: "Remove lvm utils (Debian)"
|
- name: "Remove lvm utils (Debian)"
|
||||||
become: true
|
become: true
|
||||||
apt:
|
apt:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "absent"
|
state: "absent"
|
||||||
when: "ansible_os_family == 'Debian'"
|
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
|
||||||
|
|||||||
53
contrib/terraform/aws/sample-inventory/cluster.tf
Normal file
53
contrib/terraform/aws/sample-inventory/cluster.tf
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#Global Vars
|
||||||
|
aws_cluster_name = "devtest"
|
||||||
|
|
||||||
|
#VPC Vars
|
||||||
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
|
|
||||||
|
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
||||||
|
|
||||||
|
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||||
|
|
||||||
|
#Bastion Host
|
||||||
|
aws_bastion_size = "t2.medium"
|
||||||
|
|
||||||
|
#Kubernetes Cluster
|
||||||
|
|
||||||
|
aws_kube_master_num = 3
|
||||||
|
|
||||||
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_etcd_num = 3
|
||||||
|
|
||||||
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_kube_worker_num = 4
|
||||||
|
|
||||||
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
|
#Settings AWS ELB
|
||||||
|
|
||||||
|
aws_elb_api_port = 6443
|
||||||
|
|
||||||
|
k8s_secure_api_port = 6443
|
||||||
|
|
||||||
|
kube_insecure_apiserver_address = "0.0.0.0"
|
||||||
|
|
||||||
|
default_tags = {
|
||||||
|
# Env = "devtest" # Product = "kubernetes"
|
||||||
|
}
|
||||||
|
|
||||||
|
inventory_file = "../../../inventory/hosts"
|
||||||
|
|
||||||
|
## Credentials
|
||||||
|
#AWS Access Key
|
||||||
|
AWS_ACCESS_KEY_ID = ""
|
||||||
|
|
||||||
|
#AWS Secret Key
|
||||||
|
AWS_SECRET_ACCESS_KEY = ""
|
||||||
|
|
||||||
|
#EC2 SSH Key Name
|
||||||
|
AWS_SSH_KEY_NAME = ""
|
||||||
|
|
||||||
|
#AWS Region
|
||||||
|
AWS_DEFAULT_REGION = "eu-central-1"
|
||||||
1
contrib/terraform/aws/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/aws/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../inventory/sample/group_vars
|
||||||
1
contrib/terraform/openstack/.gitignore
vendored
1
contrib/terraform/openstack/.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
.terraform
|
.terraform
|
||||||
*.tfvars
|
*.tfvars
|
||||||
|
!sample-inventory\/cluster.tfvars
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
|
|||||||
@@ -16,14 +16,13 @@ most modern installs of OpenStack that support the basic services.
|
|||||||
- [ELASTX](https://elastx.se/)
|
- [ELASTX](https://elastx.se/)
|
||||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||||
- [FugaCloud](https://fuga.cloud/)
|
- [FugaCloud](https://fuga.cloud/)
|
||||||
|
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
|
||||||
- [OVH](https://www.ovh.com/)
|
- [OVH](https://www.ovh.com/)
|
||||||
- [Rackspace](https://www.rackspace.com/)
|
- [Rackspace](https://www.rackspace.com/)
|
||||||
- [Ultimum](https://ultimum.io/)
|
- [Ultimum](https://ultimum.io/)
|
||||||
- [VexxHost](https://vexxhost.com/)
|
- [VexxHost](https://vexxhost.com/)
|
||||||
- [Zetta](https://www.zetta.io/)
|
- [Zetta](https://www.zetta.io/)
|
||||||
|
|
||||||
### Known incompatible public clouds
|
|
||||||
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
|
|
||||||
|
|
||||||
## Approach
|
## Approach
|
||||||
The terraform configuration inspects variables found in
|
The terraform configuration inspects variables found in
|
||||||
@@ -70,7 +69,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
|
||||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||||
- you already have a suitable OS image in Glance
|
- you already have a suitable OS image in Glance
|
||||||
- you already have a floating IP pool created
|
- you already have a floating IP pool created
|
||||||
@@ -220,7 +219,7 @@ set OS_PROJECT_DOMAIN_NAME=Default
|
|||||||
The construction of the cluster is driven by values found in
|
The construction of the cluster is driven by values found in
|
||||||
[variables.tf](variables.tf).
|
[variables.tf](variables.tf).
|
||||||
|
|
||||||
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|
|
||||||
|Variable | Description |
|
|Variable | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
@@ -243,7 +242,10 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
|||||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||||
|
|
||||||
#### Terraform state files
|
#### Terraform state files
|
||||||
|
|
||||||
@@ -274,7 +276,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
|||||||
You can apply the Terraform configuration to your cluster with the following command
|
You can apply the Terraform configuration to your cluster with the following command
|
||||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
if you chose to create a bastion host, this script will create
|
if you chose to create a bastion host, this script will create
|
||||||
@@ -288,7 +290,7 @@ pick it up automatically.
|
|||||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
|
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||||
@@ -323,6 +325,30 @@ $ ssh-add ~/.ssh/id_rsa
|
|||||||
|
|
||||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||||
|
|
||||||
|
#### Metadata variables
|
||||||
|
|
||||||
|
The [python script](../terraform.py) that reads the
|
||||||
|
generated`.tfstate` file to generate a dynamic inventory recognizes
|
||||||
|
some variables within a "metadata" block, defined in a "resource"
|
||||||
|
block (example):
|
||||||
|
|
||||||
|
```
|
||||||
|
resource "openstack_compute_instance_v2" "example" {
|
||||||
|
...
|
||||||
|
metadata {
|
||||||
|
ssh_user = "ubuntu"
|
||||||
|
prefer_ipv6 = true
|
||||||
|
python_bin = "/usr/bin/python3"
|
||||||
|
}
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
As the example shows, these let you define the SSH username for
|
||||||
|
Ansible, a Python binary which is needed by Ansible if
|
||||||
|
`/usr/bin/python` doesn't exist, and whether the IPv6 address of the
|
||||||
|
instance should be preferred over IPv4.
|
||||||
|
|
||||||
#### Bastion host
|
#### Bastion host
|
||||||
|
|
||||||
Bastion access will be determined by:
|
Bastion access will be determined by:
|
||||||
@@ -389,6 +415,11 @@ kube_network_plugin: flannel
|
|||||||
# For Container Linux by CoreOS:
|
# For Container Linux by CoreOS:
|
||||||
resolvconf_mode: host_resolvconf
|
resolvconf_mode: host_resolvconf
|
||||||
```
|
```
|
||||||
|
- Set max amount of attached cinder volume per host (default 256)
|
||||||
|
```
|
||||||
|
node_volume_attach_limit: 26
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Deploy Kubernetes
|
### Deploy Kubernetes
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
|
provider "openstack" {
|
||||||
|
version = "~> 1.17"
|
||||||
|
}
|
||||||
|
|
||||||
module "network" {
|
module "network" {
|
||||||
source = "modules/network"
|
source = "./modules/network"
|
||||||
|
|
||||||
external_net = "${var.external_net}"
|
external_net = "${var.external_net}"
|
||||||
network_name = "${var.network_name}"
|
network_name = "${var.network_name}"
|
||||||
@@ -10,7 +14,7 @@ module "network" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
module "ips" {
|
module "ips" {
|
||||||
source = "modules/ips"
|
source = "./modules/ips"
|
||||||
|
|
||||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
@@ -23,7 +27,7 @@ module "ips" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
module "compute" {
|
module "compute" {
|
||||||
source = "modules/compute"
|
source = "./modules/compute"
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
az_list = "${var.az_list}"
|
az_list = "${var.az_list}"
|
||||||
@@ -49,12 +53,17 @@ module "compute" {
|
|||||||
network_name = "${var.network_name}"
|
network_name = "${var.network_name}"
|
||||||
flavor_bastion = "${var.flavor_bastion}"
|
flavor_bastion = "${var.flavor_bastion}"
|
||||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||||
|
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
|
||||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||||
bastion_fips = "${module.ips.bastion_fips}"
|
bastion_fips = "${module.ips.bastion_fips}"
|
||||||
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||||
|
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
|
||||||
|
k8s_allowed_remote_ips = "${var.k8s_allowed_remote_ips}"
|
||||||
|
k8s_allowed_egress_ips = "${var.k8s_allowed_egress_ips}"
|
||||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||||
supplementary_node_groups = "${var.supplementary_node_groups}"
|
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||||
|
wait_for_floatingip = "${var.wait_for_floatingip}"
|
||||||
|
|
||||||
network_id = "${module.network.router_id}"
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
@@ -72,7 +81,7 @@ output "router_id" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_master_fips" {
|
output "k8s_master_fips" {
|
||||||
value = "${module.ips.k8s_master_fips}"
|
value = "${concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_node_fips" {
|
output "k8s_node_fips" {
|
||||||
|
|||||||
@@ -4,40 +4,44 @@ resource "openstack_compute_keypair_v2" "k8s" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "k8s_master" {
|
resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||||
name = "${var.cluster_name}-k8s-master"
|
name = "${var.cluster_name}-k8s-master"
|
||||||
description = "${var.cluster_name} - Kubernetes Master"
|
description = "${var.cluster_name} - Kubernetes Master"
|
||||||
|
delete_default_rules = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||||
|
count = "${length(var.master_allowed_remote_ips)}"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = "6443"
|
port_range_min = "6443"
|
||||||
port_range_max = "6443"
|
port_range_max = "6443"
|
||||||
remote_ip_prefix = "0.0.0.0/0"
|
remote_ip_prefix = "${var.master_allowed_remote_ips[count.index]}"
|
||||||
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
|
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "bastion" {
|
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||||
name = "${var.cluster_name}-bastion"
|
name = "${var.cluster_name}-bastion"
|
||||||
count = "${var.number_of_bastions ? 1 : 0}"
|
count = "${var.number_of_bastions != "" ? 1 : 0}"
|
||||||
description = "${var.cluster_name} - Bastion Server"
|
description = "${var.cluster_name} - Bastion Server"
|
||||||
|
delete_default_rules = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||||
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
|
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
|
||||||
direction = "ingress"
|
direction = "ingress"
|
||||||
ethertype = "IPv4"
|
ethertype = "IPv4"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range_min = "22"
|
port_range_min = "22"
|
||||||
port_range_max = "22"
|
port_range_max = "22"
|
||||||
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||||
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
|
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-k8s"
|
name = "${var.cluster_name}-k8s"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
delete_default_rules = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
||||||
@@ -47,9 +51,29 @@ resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
|||||||
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" {
|
||||||
|
count = "${length(var.k8s_allowed_remote_ips)}"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
port_range_min = "22"
|
||||||
|
port_range_max = "22"
|
||||||
|
remote_ip_prefix = "${var.k8s_allowed_remote_ips[count.index]}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "egress" {
|
||||||
|
count = "${length(var.k8s_allowed_egress_ips)}"
|
||||||
|
direction = "egress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
remote_ip_prefix = "${var.k8s_allowed_egress_ips[count.index]}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "worker" {
|
resource "openstack_networking_secgroup_v2" "worker" {
|
||||||
name = "${var.cluster_name}-k8s-worker"
|
name = "${var.cluster_name}-k8s-worker"
|
||||||
description = "${var.cluster_name} - Kubernetes worker nodes"
|
description = "${var.cluster_name} - Kubernetes worker nodes"
|
||||||
|
delete_default_rules = true
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||||
@@ -75,8 +99,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_networking_secgroup_v2.bastion.name}",
|
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
|
||||||
"default",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
@@ -86,7 +109,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,22 +125,18 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# The join() hack is described here: https://github.com/hashicorp/terraform/issues/11566
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
# As a workaround for creating "dynamic" lists (when, for example, no bastion host is created)
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
|
]
|
||||||
|
|
||||||
security_groups = ["${compact(list(
|
|
||||||
openstack_networking_secgroup_v2.k8s_master.name,
|
|
||||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
|
||||||
openstack_networking_secgroup_v2.k8s.name,
|
|
||||||
"default",
|
|
||||||
))}"]
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,11 +152,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${compact(list(
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
openstack_networking_secgroup_v2.k8s_master.name,
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
]
|
||||||
openstack_networking_secgroup_v2.k8s.name,
|
|
||||||
))}"]
|
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
@@ -146,7 +163,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,7 +202,6 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
@@ -230,12 +246,9 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${compact(list(
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
openstack_networking_secgroup_v2.k8s_master.name,
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
]
|
||||||
openstack_networking_secgroup_v2.k8s.name,
|
|
||||||
"default",
|
|
||||||
))}"]
|
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
@@ -244,7 +257,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,7 +275,6 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
"default",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
@@ -273,21 +285,30 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||||
count = "${var.number_of_bastions}"
|
count = "${var.number_of_bastions}"
|
||||||
floating_ip = "${var.bastion_fips[count.index]}"
|
floating_ip = "${var.bastion_fips[count.index]}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||||
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||||
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||||
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
||||||
|
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||||
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||||
@@ -309,9 +330,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
"default",
|
|
||||||
]
|
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user_gfs}"
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
|||||||
@@ -54,6 +54,10 @@ variable "k8s_master_fips" {
|
|||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "k8s_master_no_etcd_fips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
variable "k8s_node_fips" {
|
variable "k8s_node_fips" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
@@ -66,6 +70,20 @@ variable "bastion_allowed_remote_ips" {
|
|||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "master_allowed_remote_ips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_allowed_remote_ips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_allowed_egress_ips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "wait_for_floatingip" {}
|
||||||
|
|
||||||
variable "supplementary_master_groups" {
|
variable "supplementary_master_groups" {
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
resource "null_resource" "dummy_dependency" {
|
resource "null_resource" "dummy_dependency" {
|
||||||
triggers {
|
triggers = {
|
||||||
dependency_id = "${var.router_id}"
|
dependency_id = "${var.router_id}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -10,6 +10,12 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
|||||||
depends_on = ["null_resource.dummy_dependency"]
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||||
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
pool = "${var.floatingip_pool}"
|
pool = "${var.floatingip_pool}"
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
output "k8s_master_fips" {
|
output "k8s_master_fips" {
|
||||||
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "k8s_master_no_etcd_fips" {
|
||||||
|
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_node_fips" {
|
output "k8s_node_fips" {
|
||||||
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "bastion_fips" {
|
output "bastion_fips" {
|
||||||
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ resource "openstack_networking_network_v2" "k8s" {
|
|||||||
resource "openstack_networking_subnet_v2" "k8s" {
|
resource "openstack_networking_subnet_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-internal-network"
|
name = "${var.cluster_name}-internal-network"
|
||||||
count = "${var.use_neutron}"
|
count = "${var.use_neutron}"
|
||||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
|
||||||
cidr = "${var.subnet_cidr}"
|
cidr = "${var.subnet_cidr}"
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
dns_nameservers = "${var.dns_nameservers}"
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
@@ -22,6 +22,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
|||||||
|
|
||||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||||
count = "${var.use_neutron}"
|
count = "${var.use_neutron}"
|
||||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
|
||||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,6 +125,11 @@ variable "floatingip_pool" {
|
|||||||
default = "external"
|
default = "external"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "wait_for_floatingip" {
|
||||||
|
description = "Terraform will poll the instance until the floating IP has been associated."
|
||||||
|
default = "false"
|
||||||
|
}
|
||||||
|
|
||||||
variable "external_net" {
|
variable "external_net" {
|
||||||
description = "uuid of the external/public network"
|
description = "uuid of the external/public network"
|
||||||
}
|
}
|
||||||
@@ -145,6 +150,24 @@ variable "bastion_allowed_remote_ips" {
|
|||||||
default = ["0.0.0.0/0"]
|
default = ["0.0.0.0/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "master_allowed_remote_ips" {
|
||||||
|
description = "An array of CIDRs allowed to access API of masters"
|
||||||
|
type = "list"
|
||||||
|
default = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_allowed_remote_ips" {
|
||||||
|
description = "An array of CIDRs allowed to SSH to hosts"
|
||||||
|
type = "list"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_allowed_egress_ips" {
|
||||||
|
description = "An array of CIDRs allowed for egress traffic"
|
||||||
|
type = "list"
|
||||||
|
default = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
variable "worker_allowed_ports" {
|
variable "worker_allowed_ports" {
|
||||||
type = "list"
|
type = "list"
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
# Configure the Packet Provider
|
# Configure the Packet Provider
|
||||||
provider "packet" {}
|
provider "packet" {
|
||||||
|
version = "~> 2.0"
|
||||||
|
}
|
||||||
|
|
||||||
resource "packet_ssh_key" "k8s" {
|
resource "packet_ssh_key" "k8s" {
|
||||||
count = "${var.public_key_path != "" ? 1 : 0}"
|
count = "${var.public_key_path != "" ? 1 : 0}"
|
||||||
@@ -13,7 +15,7 @@ resource "packet_device" "k8s_master" {
|
|||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters}"
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||||
plan = "${var.plan_k8s_masters}"
|
plan = "${var.plan_k8s_masters}"
|
||||||
facility = "${var.facility}"
|
facilities = ["${var.facility}"]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = "${var.operating_system}"
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = "${var.billing_cycle}"
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = "${var.packet_project_id}"
|
||||||
@@ -26,7 +28,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
|||||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||||
plan = "${var.plan_k8s_masters_no_etcd}"
|
plan = "${var.plan_k8s_masters_no_etcd}"
|
||||||
facility = "${var.facility}"
|
facilities = ["${var.facility}"]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = "${var.operating_system}"
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = "${var.billing_cycle}"
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = "${var.packet_project_id}"
|
||||||
@@ -39,7 +41,7 @@ resource "packet_device" "k8s_etcd" {
|
|||||||
count = "${var.number_of_etcd}"
|
count = "${var.number_of_etcd}"
|
||||||
hostname = "${var.cluster_name}-etcd-${count.index+1}"
|
hostname = "${var.cluster_name}-etcd-${count.index+1}"
|
||||||
plan = "${var.plan_etcd}"
|
plan = "${var.plan_etcd}"
|
||||||
facility = "${var.facility}"
|
facilities = ["${var.facility}"]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = "${var.operating_system}"
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = "${var.billing_cycle}"
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = "${var.packet_project_id}"
|
||||||
@@ -52,7 +54,7 @@ resource "packet_device" "k8s_node" {
|
|||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
hostname = "${var.cluster_name}-k8s-node-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||||
plan = "${var.plan_k8s_nodes}"
|
plan = "${var.plan_k8s_nodes}"
|
||||||
facility = "${var.facility}"
|
facilities = ["${var.facility}"]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = "${var.operating_system}"
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = "${var.billing_cycle}"
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = "${var.packet_project_id}"
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
#
|
#
|
||||||
# Copyright 2015 Cisco Systems, Inc.
|
# Copyright 2015 Cisco Systems, Inc.
|
||||||
#
|
#
|
||||||
@@ -20,15 +20,15 @@
|
|||||||
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
|
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
|
||||||
directory and generates an inventory based on them.
|
directory and generates an inventory based on them.
|
||||||
"""
|
"""
|
||||||
from __future__ import unicode_literals, print_function
|
|
||||||
import argparse
|
import argparse
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
import random
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
VERSION = '0.3.0pre'
|
VERSION = '0.4.0pre'
|
||||||
|
|
||||||
|
|
||||||
def tfstates(root=None):
|
def tfstates(root=None):
|
||||||
@@ -38,15 +38,58 @@ def tfstates(root=None):
|
|||||||
if os.path.splitext(name)[-1] == '.tfstate':
|
if os.path.splitext(name)[-1] == '.tfstate':
|
||||||
yield os.path.join(dirpath, name)
|
yield os.path.join(dirpath, name)
|
||||||
|
|
||||||
|
def convert_to_v3_structure(attributes, prefix=''):
|
||||||
|
""" Convert the attributes from v4 to v3
|
||||||
|
Receives a dict and return a dictionary """
|
||||||
|
result = {}
|
||||||
|
if isinstance(attributes, str):
|
||||||
|
# In the case when we receive a string (e.g. values for security_groups)
|
||||||
|
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
|
||||||
|
for key, value in attributes.items():
|
||||||
|
if isinstance(value, list):
|
||||||
|
if len(value):
|
||||||
|
result['{}{}.#'.format(prefix, key, hash)] = len(value)
|
||||||
|
for i, v in enumerate(value):
|
||||||
|
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
result['{}{}.%'.format(prefix, key)] = len(value)
|
||||||
|
for k, v in value.items():
|
||||||
|
result['{}{}.{}'.format(prefix, key, k)] = v
|
||||||
|
else:
|
||||||
|
result['{}{}'.format(prefix, key)] = value
|
||||||
|
return result
|
||||||
|
|
||||||
def iterresources(filenames):
|
def iterresources(filenames):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
with open(filename, 'r') as json_file:
|
with open(filename, 'r') as json_file:
|
||||||
state = json.load(json_file)
|
state = json.load(json_file)
|
||||||
for module in state['modules']:
|
tf_version = state['version']
|
||||||
name = module['path'][-1]
|
if tf_version == 3:
|
||||||
for key, resource in module['resources'].items():
|
for module in state['modules']:
|
||||||
yield name, key, resource
|
name = module['path'][-1]
|
||||||
|
for key, resource in module['resources'].items():
|
||||||
|
yield name, key, resource
|
||||||
|
elif tf_version == 4:
|
||||||
|
# In version 4 the structure changes so we need to iterate
|
||||||
|
# each instance inside the resource branch.
|
||||||
|
for resource in state['resources']:
|
||||||
|
name = resource['module'].split('.')[-1]
|
||||||
|
for instance in resource['instances']:
|
||||||
|
key = "{}.{}".format(resource['type'], resource['name'])
|
||||||
|
if 'index_key' in instance:
|
||||||
|
key = "{}.{}".format(key, instance['index_key'])
|
||||||
|
data = {}
|
||||||
|
data['type'] = resource['type']
|
||||||
|
data['provider'] = resource['provider']
|
||||||
|
data['depends_on'] = instance.get('depends_on', [])
|
||||||
|
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
|
||||||
|
if 'id' in instance['attributes']:
|
||||||
|
data['primary']['id'] = instance['attributes']['id']
|
||||||
|
data['primary']['meta'] = instance['attributes'].get('meta',{})
|
||||||
|
yield name, key, data
|
||||||
|
else:
|
||||||
|
raise KeyError('tfstate version %d not supported' % tf_version)
|
||||||
|
|
||||||
|
|
||||||
## READ RESOURCES
|
## READ RESOURCES
|
||||||
PARSERS = {}
|
PARSERS = {}
|
||||||
@@ -109,7 +152,7 @@ def calculate_mantl_vars(func):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_prefix(source, prefix, sep='.'):
|
def _parse_prefix(source, prefix, sep='.'):
|
||||||
for compkey, value in source.items():
|
for compkey, value in list(source.items()):
|
||||||
try:
|
try:
|
||||||
curprefix, rest = compkey.split(sep, 1)
|
curprefix, rest = compkey.split(sep, 1)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -127,7 +170,7 @@ def parse_attr_list(source, prefix, sep='.'):
|
|||||||
idx, key = compkey.split(sep, 1)
|
idx, key = compkey.split(sep, 1)
|
||||||
attrs[idx][key] = value
|
attrs[idx][key] = value
|
||||||
|
|
||||||
return attrs.values()
|
return list(attrs.values())
|
||||||
|
|
||||||
|
|
||||||
def parse_dict(source, prefix, sep='.'):
|
def parse_dict(source, prefix, sep='.'):
|
||||||
@@ -149,75 +192,6 @@ def parse_bool(string_form):
|
|||||||
raise ValueError('could not convert %r to a bool' % string_form)
|
raise ValueError('could not convert %r to a bool' % string_form)
|
||||||
|
|
||||||
|
|
||||||
@parses('triton_machine')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def triton_machine(resource, module_name):
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
name = raw_attrs.get('name')
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'dataset': raw_attrs['dataset'],
|
|
||||||
'disk': raw_attrs['disk'],
|
|
||||||
'firewall_enabled': parse_bool(raw_attrs['firewall_enabled']),
|
|
||||||
'image': raw_attrs['image'],
|
|
||||||
'ips': parse_list(raw_attrs, 'ips'),
|
|
||||||
'memory': raw_attrs['memory'],
|
|
||||||
'name': raw_attrs['name'],
|
|
||||||
'networks': parse_list(raw_attrs, 'networks'),
|
|
||||||
'package': raw_attrs['package'],
|
|
||||||
'primary_ip': raw_attrs['primaryip'],
|
|
||||||
'root_authorized_keys': raw_attrs['root_authorized_keys'],
|
|
||||||
'state': raw_attrs['state'],
|
|
||||||
'tags': parse_dict(raw_attrs, 'tags'),
|
|
||||||
'type': raw_attrs['type'],
|
|
||||||
'user_data': raw_attrs['user_data'],
|
|
||||||
'user_script': raw_attrs['user_script'],
|
|
||||||
|
|
||||||
# ansible
|
|
||||||
'ansible_ssh_host': raw_attrs['primaryip'],
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'ansible_ssh_user': 'root', # it's "root" on Triton by default
|
|
||||||
|
|
||||||
# generic
|
|
||||||
'public_ipv4': raw_attrs['primaryip'],
|
|
||||||
'provider': 'triton',
|
|
||||||
}
|
|
||||||
|
|
||||||
# private IPv4
|
|
||||||
for ip in attrs['ips']:
|
|
||||||
if ip.startswith('10') or ip.startswith('192.168'): # private IPs
|
|
||||||
attrs['private_ipv4'] = ip
|
|
||||||
break
|
|
||||||
|
|
||||||
if 'private_ipv4' not in attrs:
|
|
||||||
attrs['private_ipv4'] = attrs['public_ipv4']
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['tags'].get('dc', 'none')),
|
|
||||||
'role': attrs['tags'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['tags'].get('python_bin', 'python')
|
|
||||||
})
|
|
||||||
|
|
||||||
# add groups based on attrs
|
|
||||||
groups.append('triton_image=' + attrs['image'])
|
|
||||||
groups.append('triton_package=' + attrs['package'])
|
|
||||||
groups.append('triton_state=' + attrs['state'])
|
|
||||||
groups.append('triton_firewall_enabled=%s' % attrs['firewall_enabled'])
|
|
||||||
groups.extend('triton_tags_%s=%s' % item
|
|
||||||
for item in attrs['tags'].items())
|
|
||||||
groups.extend('triton_network=' + network
|
|
||||||
for network in attrs['networks'])
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
@parses('packet_device')
|
@parses('packet_device')
|
||||||
def packet_device(resource, tfvars=None):
|
def packet_device(resource, tfvars=None):
|
||||||
raw_attrs = resource['primary']['attributes']
|
raw_attrs = resource['primary']['attributes']
|
||||||
@@ -226,7 +200,7 @@ def packet_device(resource, tfvars=None):
|
|||||||
|
|
||||||
attrs = {
|
attrs = {
|
||||||
'id': raw_attrs['id'],
|
'id': raw_attrs['id'],
|
||||||
'facility': raw_attrs['facility'],
|
'facilities': parse_list(raw_attrs, 'facilities'),
|
||||||
'hostname': raw_attrs['hostname'],
|
'hostname': raw_attrs['hostname'],
|
||||||
'operating_system': raw_attrs['operating_system'],
|
'operating_system': raw_attrs['operating_system'],
|
||||||
'locked': parse_bool(raw_attrs['locked']),
|
'locked': parse_bool(raw_attrs['locked']),
|
||||||
@@ -247,7 +221,6 @@ def packet_device(resource, tfvars=None):
|
|||||||
}
|
}
|
||||||
|
|
||||||
# add groups based on attrs
|
# add groups based on attrs
|
||||||
groups.append('packet_facility=' + attrs['facility'])
|
|
||||||
groups.append('packet_operating_system=' + attrs['operating_system'])
|
groups.append('packet_operating_system=' + attrs['operating_system'])
|
||||||
groups.append('packet_locked=%s' % attrs['locked'])
|
groups.append('packet_locked=%s' % attrs['locked'])
|
||||||
groups.append('packet_state=' + attrs['state'])
|
groups.append('packet_state=' + attrs['state'])
|
||||||
@@ -259,94 +232,6 @@ def packet_device(resource, tfvars=None):
|
|||||||
return name, attrs, groups
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
@parses('digitalocean_droplet')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def digitalocean_host(resource, tfvars=None):
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
name = raw_attrs['name']
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'image': raw_attrs['image'],
|
|
||||||
'ipv4_address': raw_attrs['ipv4_address'],
|
|
||||||
'locked': parse_bool(raw_attrs['locked']),
|
|
||||||
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
|
||||||
'region': raw_attrs['region'],
|
|
||||||
'size': raw_attrs['size'],
|
|
||||||
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
|
||||||
'status': raw_attrs['status'],
|
|
||||||
# ansible
|
|
||||||
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'ansible_ssh_user': 'root', # it's always "root" on DO
|
|
||||||
# generic
|
|
||||||
'public_ipv4': raw_attrs['ipv4_address'],
|
|
||||||
'private_ipv4': raw_attrs.get('ipv4_address_private',
|
|
||||||
raw_attrs['ipv4_address']),
|
|
||||||
'provider': 'digitalocean',
|
|
||||||
}
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
|
||||||
'role': attrs['metadata'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
|
||||||
})
|
|
||||||
|
|
||||||
# add groups based on attrs
|
|
||||||
groups.append('do_image=' + attrs['image'])
|
|
||||||
groups.append('do_locked=%s' % attrs['locked'])
|
|
||||||
groups.append('do_region=' + attrs['region'])
|
|
||||||
groups.append('do_size=' + attrs['size'])
|
|
||||||
groups.append('do_status=' + attrs['status'])
|
|
||||||
groups.extend('do_metadata_%s=%s' % item
|
|
||||||
for item in attrs['metadata'].items())
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
@parses('softlayer_virtualserver')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def softlayer_host(resource, module_name):
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
name = raw_attrs['name']
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'image': raw_attrs['image'],
|
|
||||||
'ipv4_address': raw_attrs['ipv4_address'],
|
|
||||||
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
|
||||||
'region': raw_attrs['region'],
|
|
||||||
'ram': raw_attrs['ram'],
|
|
||||||
'cpu': raw_attrs['cpu'],
|
|
||||||
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
|
||||||
'public_ipv4': raw_attrs['ipv4_address'],
|
|
||||||
'private_ipv4': raw_attrs['ipv4_address_private'],
|
|
||||||
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'ansible_ssh_user': 'root',
|
|
||||||
'provider': 'softlayer',
|
|
||||||
}
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
|
||||||
'role': attrs['metadata'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
|
||||||
})
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
def openstack_floating_ips(resource):
|
def openstack_floating_ips(resource):
|
||||||
raw_attrs = resource['primary']['attributes']
|
raw_attrs = resource['primary']['attributes']
|
||||||
attrs = {
|
attrs = {
|
||||||
@@ -397,10 +282,16 @@ def openstack_host(resource, module_name):
|
|||||||
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
attrs.update({
|
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||||
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
attrs.update({
|
||||||
'publicly_routable': True,
|
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||||
})
|
'publicly_routable': True,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
attrs.update({
|
||||||
|
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
||||||
|
'publicly_routable': True,
|
||||||
|
})
|
||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||||
|
|
||||||
@@ -410,9 +301,9 @@ def openstack_host(resource, module_name):
|
|||||||
if 'metadata.ssh_user' in raw_attrs:
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
|
|
||||||
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
|
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
|
||||||
device_index = 1
|
device_index = 1
|
||||||
for key, value in raw_attrs.items():
|
for key, value in list(raw_attrs.items()):
|
||||||
match = re.search("^volume.*.device$", key)
|
match = re.search("^volume.*.device$", key)
|
||||||
if match:
|
if match:
|
||||||
attrs['disk_volume_device_'+str(device_index)] = value
|
attrs['disk_volume_device_'+str(device_index)] = value
|
||||||
@@ -430,7 +321,7 @@ def openstack_host(resource, module_name):
|
|||||||
groups.append('os_image=' + attrs['image']['name'])
|
groups.append('os_image=' + attrs['image']['name'])
|
||||||
groups.append('os_flavor=' + attrs['flavor']['name'])
|
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||||
groups.extend('os_metadata_%s=%s' % item
|
groups.extend('os_metadata_%s=%s' % item
|
||||||
for item in attrs['metadata'].items())
|
for item in list(attrs['metadata'].items()))
|
||||||
groups.append('os_region=' + attrs['region'])
|
groups.append('os_region=' + attrs['region'])
|
||||||
|
|
||||||
# groups specific to Mantl
|
# groups specific to Mantl
|
||||||
@@ -444,281 +335,6 @@ def openstack_host(resource, module_name):
|
|||||||
return name, attrs, groups
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
@parses('aws_instance')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def aws_host(resource, module_name):
|
|
||||||
name = resource['primary']['attributes']['tags.Name']
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'ami': raw_attrs['ami'],
|
|
||||||
'availability_zone': raw_attrs['availability_zone'],
|
|
||||||
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
|
|
||||||
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
|
|
||||||
'ephemeral_block_device': parse_attr_list(raw_attrs,
|
|
||||||
'ephemeral_block_device'),
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'key_name': raw_attrs['key_name'],
|
|
||||||
'private': parse_dict(raw_attrs, 'private',
|
|
||||||
sep='_'),
|
|
||||||
'public': parse_dict(raw_attrs, 'public',
|
|
||||||
sep='_'),
|
|
||||||
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
|
|
||||||
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
|
||||||
'subnet': parse_dict(raw_attrs, 'subnet',
|
|
||||||
sep='_'),
|
|
||||||
'tags': parse_dict(raw_attrs, 'tags'),
|
|
||||||
'tenancy': raw_attrs['tenancy'],
|
|
||||||
'vpc_security_group_ids': parse_list(raw_attrs,
|
|
||||||
'vpc_security_group_ids'),
|
|
||||||
# ansible-specific
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'ansible_ssh_host': raw_attrs['public_ip'],
|
|
||||||
# generic
|
|
||||||
'public_ipv4': raw_attrs['public_ip'],
|
|
||||||
'private_ipv4': raw_attrs['private_ip'],
|
|
||||||
'provider': 'aws',
|
|
||||||
}
|
|
||||||
|
|
||||||
# attrs specific to Ansible
|
|
||||||
if 'tags.sshUser' in raw_attrs:
|
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['tags.sshUser']
|
|
||||||
if 'tags.sshPrivateIp' in raw_attrs:
|
|
||||||
attrs['ansible_ssh_host'] = raw_attrs['private_ip']
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
|
|
||||||
'role': attrs['tags'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['tags'].get('python_bin','python')
|
|
||||||
})
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.extend(['aws_ami=' + attrs['ami'],
|
|
||||||
'aws_az=' + attrs['availability_zone'],
|
|
||||||
'aws_key_name=' + attrs['key_name'],
|
|
||||||
'aws_tenancy=' + attrs['tenancy']])
|
|
||||||
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
|
|
||||||
groups.extend('aws_vpc_security_group=' + group
|
|
||||||
for group in attrs['vpc_security_group_ids'])
|
|
||||||
groups.extend('aws_subnet_%s=%s' % subnet
|
|
||||||
for subnet in attrs['subnet'].items())
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
@parses('google_compute_instance')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def gce_host(resource, module_name):
|
|
||||||
name = resource['primary']['id']
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
# network interfaces
|
|
||||||
interfaces = parse_attr_list(raw_attrs, 'network_interface')
|
|
||||||
for interface in interfaces:
|
|
||||||
interface['access_config'] = parse_attr_list(interface,
|
|
||||||
'access_config')
|
|
||||||
for key in interface.keys():
|
|
||||||
if '.' in key:
|
|
||||||
del interface[key]
|
|
||||||
|
|
||||||
# general attrs
|
|
||||||
attrs = {
|
|
||||||
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
|
|
||||||
'disks': parse_attr_list(raw_attrs, 'disk'),
|
|
||||||
'machine_type': raw_attrs['machine_type'],
|
|
||||||
'metadata': parse_dict(raw_attrs, 'metadata'),
|
|
||||||
'network': parse_attr_list(raw_attrs, 'network'),
|
|
||||||
'network_interface': interfaces,
|
|
||||||
'self_link': raw_attrs['self_link'],
|
|
||||||
'service_account': parse_attr_list(raw_attrs, 'service_account'),
|
|
||||||
'tags': parse_list(raw_attrs, 'tags'),
|
|
||||||
'zone': raw_attrs['zone'],
|
|
||||||
# ansible
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'provider': 'gce',
|
|
||||||
}
|
|
||||||
|
|
||||||
# attrs specific to Ansible
|
|
||||||
if 'metadata.ssh_user' in raw_attrs:
|
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
|
||||||
'role': attrs['metadata'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
|
||||||
})
|
|
||||||
|
|
||||||
try:
|
|
||||||
attrs.update({
|
|
||||||
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
|
||||||
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
|
||||||
'private_ipv4': interfaces[0]['address'],
|
|
||||||
'publicly_routable': True,
|
|
||||||
})
|
|
||||||
except (KeyError, ValueError):
|
|
||||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
|
||||||
|
|
||||||
# add groups based on attrs
|
|
||||||
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
|
|
||||||
groups.append('gce_machine_type=' + attrs['machine_type'])
|
|
||||||
groups.extend('gce_metadata_%s=%s' % (key, value)
|
|
||||||
for (key, value) in attrs['metadata'].items()
|
|
||||||
if key not in set(['sshKeys']))
|
|
||||||
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
|
|
||||||
groups.append('gce_zone=' + attrs['zone'])
|
|
||||||
|
|
||||||
if attrs['can_ip_forward']:
|
|
||||||
groups.append('gce_ip_forward')
|
|
||||||
if attrs['publicly_routable']:
|
|
||||||
groups.append('gce_publicly_routable')
|
|
||||||
|
|
||||||
# groups specific to Mantl
|
|
||||||
groups.append('role=' + attrs['metadata'].get('role', 'none'))
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
@parses('vsphere_virtual_machine')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def vsphere_host(resource, module_name):
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
network_attrs = parse_dict(raw_attrs, 'network_interface')
|
|
||||||
network = parse_dict(network_attrs, '0')
|
|
||||||
ip_address = network.get('ipv4_address', network['ip_address'])
|
|
||||||
name = raw_attrs['name']
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'ip_address': ip_address,
|
|
||||||
'private_ipv4': ip_address,
|
|
||||||
'public_ipv4': ip_address,
|
|
||||||
'metadata': parse_dict(raw_attrs, 'custom_configuration_parameters'),
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'provider': 'vsphere',
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
attrs.update({
|
|
||||||
'ansible_ssh_host': ip_address,
|
|
||||||
})
|
|
||||||
except (KeyError, ValueError):
|
|
||||||
attrs.update({'ansible_ssh_host': '', })
|
|
||||||
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
|
|
||||||
'role': attrs['metadata'].get('role', 'none'),
|
|
||||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
|
||||||
})
|
|
||||||
|
|
||||||
# attrs specific to Ansible
|
|
||||||
if 'ssh_user' in attrs['metadata']:
|
|
||||||
attrs['ansible_ssh_user'] = attrs['metadata']['ssh_user']
|
|
||||||
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
@parses('azure_instance')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def azure_host(resource, module_name):
|
|
||||||
name = resource['primary']['attributes']['name']
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
|
|
||||||
groups = []
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'automatic_updates': raw_attrs['automatic_updates'],
|
|
||||||
'description': raw_attrs['description'],
|
|
||||||
'hosted_service_name': raw_attrs['hosted_service_name'],
|
|
||||||
'id': raw_attrs['id'],
|
|
||||||
'image': raw_attrs['image'],
|
|
||||||
'ip_address': raw_attrs['ip_address'],
|
|
||||||
'location': raw_attrs['location'],
|
|
||||||
'name': raw_attrs['name'],
|
|
||||||
'reverse_dns': raw_attrs['reverse_dns'],
|
|
||||||
'security_group': raw_attrs['security_group'],
|
|
||||||
'size': raw_attrs['size'],
|
|
||||||
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
|
|
||||||
'subnet': raw_attrs['subnet'],
|
|
||||||
'username': raw_attrs['username'],
|
|
||||||
'vip_address': raw_attrs['vip_address'],
|
|
||||||
'virtual_network': raw_attrs['virtual_network'],
|
|
||||||
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
|
|
||||||
# ansible
|
|
||||||
'ansible_ssh_port': 22,
|
|
||||||
'ansible_ssh_user': raw_attrs['username'],
|
|
||||||
'ansible_ssh_host': raw_attrs['vip_address'],
|
|
||||||
}
|
|
||||||
|
|
||||||
# attrs specific to mantl
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': attrs['location'].lower().replace(" ", "-"),
|
|
||||||
'role': attrs['description']
|
|
||||||
})
|
|
||||||
|
|
||||||
# groups specific to mantl
|
|
||||||
groups.extend(['azure_image=' + attrs['image'],
|
|
||||||
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
|
|
||||||
'azure_username=' + attrs['username'],
|
|
||||||
'azure_security_group=' + attrs['security_group']])
|
|
||||||
|
|
||||||
# groups specific to mantl
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
@parses('clc_server')
|
|
||||||
@calculate_mantl_vars
|
|
||||||
def clc_server(resource, module_name):
|
|
||||||
raw_attrs = resource['primary']['attributes']
|
|
||||||
name = raw_attrs.get('id')
|
|
||||||
groups = []
|
|
||||||
md = parse_dict(raw_attrs, 'metadata')
|
|
||||||
attrs = {
|
|
||||||
'metadata': md,
|
|
||||||
'ansible_ssh_port': md.get('ssh_port', 22),
|
|
||||||
'ansible_ssh_user': md.get('ssh_user', 'root'),
|
|
||||||
'provider': 'clc',
|
|
||||||
'publicly_routable': False,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
attrs.update({
|
|
||||||
'public_ipv4': raw_attrs['public_ip_address'],
|
|
||||||
'private_ipv4': raw_attrs['private_ip_address'],
|
|
||||||
'ansible_ssh_host': raw_attrs['public_ip_address'],
|
|
||||||
'publicly_routable': True,
|
|
||||||
})
|
|
||||||
except (KeyError, ValueError):
|
|
||||||
attrs.update({
|
|
||||||
'ansible_ssh_host': raw_attrs['private_ip_address'],
|
|
||||||
'private_ipv4': raw_attrs['private_ip_address'],
|
|
||||||
})
|
|
||||||
|
|
||||||
attrs.update({
|
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
|
||||||
'role': attrs['metadata'].get('role', 'none'),
|
|
||||||
})
|
|
||||||
|
|
||||||
groups.append('role=' + attrs['role'])
|
|
||||||
groups.append('dc=' + attrs['consul_dc'])
|
|
||||||
return name, attrs, groups
|
|
||||||
|
|
||||||
|
|
||||||
def iter_host_ips(hosts, ips):
|
def iter_host_ips(hosts, ips):
|
||||||
'''Update hosts that have an entry in the floating IP list'''
|
'''Update hosts that have an entry in the floating IP list'''
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: 2
|
delay: 2
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: vault_etcd_health_check
|
register: vault_etcd_health_check
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
# Stop temporary Vault if it's running (can linger if playbook fails out)
|
# Stop temporary Vault if it's running (can linger if playbook fails out)
|
||||||
- name: stop vault-temp container
|
- name: stop vault-temp container
|
||||||
shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }}
|
shell: docker stop {{ vault_temp_container_name }}
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: vault_temp_stop
|
register: vault_temp_stop
|
||||||
changed_when: vault_temp_stop is succeeded
|
changed_when: vault_temp_stop is succeeded
|
||||||
|
|||||||
@@ -5,17 +5,19 @@
|
|||||||
set_fact:
|
set_fact:
|
||||||
sync_file_dir: "{{ sync_file_path | dirname }}"
|
sync_file_dir: "{{ sync_file_path | dirname }}"
|
||||||
sync_file: "{{ sync_file_path | basename }}"
|
sync_file: "{{ sync_file_path | basename }}"
|
||||||
when: sync_file_path is defined and sync_file_path != ''
|
when:
|
||||||
|
- sync_file_path is defined
|
||||||
|
- sync_file_path
|
||||||
|
|
||||||
- name: "sync_file | Set fact for sync_file_path when undefined"
|
- name: "sync_file | Set fact for sync_file_path when undefined"
|
||||||
set_fact:
|
set_fact:
|
||||||
sync_file_path: "{{ (sync_file_dir, sync_file)|join('/') }}"
|
sync_file_path: "{{ (sync_file_dir, sync_file)|join('/') }}"
|
||||||
when: sync_file_path is not defined or sync_file_path == ''
|
when: sync_file_path is not defined or not sync_file_path
|
||||||
|
|
||||||
- name: "sync_file | Set fact for key path name"
|
- name: "sync_file | Set fact for key path name"
|
||||||
set_fact:
|
set_fact:
|
||||||
sync_file_key_path: "{{ sync_file_path.rsplit('.', 1)|first + '-key.' + sync_file_path.rsplit('.', 1)|last }}"
|
sync_file_key_path: "{{ sync_file_path.rsplit('.', 1)|first + '-key.' + sync_file_path.rsplit('.', 1)|last }}"
|
||||||
when: sync_file_key_path is not defined or sync_file_key_path == ''
|
when: sync_file_key_path is not defined or not sync_file_key_path
|
||||||
|
|
||||||
- name: "sync_file | Check if {{sync_file_path}} file exists"
|
- name: "sync_file | Check if {{sync_file_path}} file exists"
|
||||||
stat:
|
stat:
|
||||||
@@ -46,17 +48,17 @@
|
|||||||
- name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first"
|
- name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first"
|
||||||
set_fact:
|
set_fact:
|
||||||
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
||||||
when: >-
|
when:
|
||||||
sync_file_srcs|d([])|length > 1 and
|
- sync_file_srcs|d([])|length > 1
|
||||||
inventory_hostname != sync_file_srcs|first
|
- inventory_hostname != sync_file_srcs|first
|
||||||
|
|
||||||
- name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first"
|
- name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first"
|
||||||
set_fact:
|
set_fact:
|
||||||
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
||||||
when: >-
|
when:
|
||||||
sync_file_is_cert|d() and
|
- sync_file_is_cert|d()
|
||||||
sync_file_key_srcs|d([])|length > 1 and
|
- sync_file_key_srcs|d([])|length > 1
|
||||||
inventory_hostname != sync_file_key_srcs|first
|
- inventory_hostname != sync_file_key_srcs|first
|
||||||
|
|
||||||
- name: "sync_file | Consolidate file and key sources"
|
- name: "sync_file | Consolidate file and key sources"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|||||||
@@ -1,45 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=hashicorp vault on rkt
|
|
||||||
Documentation=https://github.com/hashicorp/vault
|
|
||||||
Wants=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=root
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=10s
|
|
||||||
TimeoutStartSec=5
|
|
||||||
LimitNOFILE=40000
|
|
||||||
# Container has the following internal mount points:
|
|
||||||
# /vault/file/ # File backend storage location
|
|
||||||
# /vault/logs/ # Log files
|
|
||||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
|
|
||||||
|
|
||||||
ExecStart=/usr/bin/rkt run \
|
|
||||||
--insecure-options=image \
|
|
||||||
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
|
|
||||||
--mount volume=hosts,target=/etc/hosts \
|
|
||||||
--volume=volume-vault-file,kind=host,source=/var/lib/vault \
|
|
||||||
--volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
|
|
||||||
--volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
|
|
||||||
--mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
|
|
||||||
--volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
|
|
||||||
--mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
|
|
||||||
--volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
|
|
||||||
--mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
|
|
||||||
--volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
|
|
||||||
--mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
|
|
||||||
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
|
|
||||||
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
|
||||||
docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
|
|
||||||
--uuid-file-save=/var/run/vault.uuid \
|
|
||||||
--name={{ vault_container_name }} \
|
|
||||||
--net=host \
|
|
||||||
--caps-retain=CAP_IPC_LOCK \
|
|
||||||
--exec vault -- \
|
|
||||||
server \
|
|
||||||
--config={{ vault_config_dir }}/config.json
|
|
||||||
|
|
||||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -93,6 +93,6 @@ Potential Work
|
|||||||
- Change the Vault role to not run certain tasks when ``root_token`` and
|
- Change the Vault role to not run certain tasks when ``root_token`` and
|
||||||
``unseal_keys`` are not present. Alternatively, allow user input for these
|
``unseal_keys`` are not present. Alternatively, allow user input for these
|
||||||
values when missing.
|
values when missing.
|
||||||
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
- Add the ability to start temp Vault with Host or Docker
|
||||||
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||||
so other services can be used (such as Consul)
|
so other services can be used (such as Consul)
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Azure](docs/azure.md)
|
* [Azure](docs/azure.md)
|
||||||
* [OpenStack](/docs/openstack.md)
|
* [OpenStack](/docs/openstack.md)
|
||||||
|
* [Packet](/docs/packet.md)
|
||||||
* [vSphere](/docs/vsphere.md)
|
* [vSphere](/docs/vsphere.md)
|
||||||
* Operating Systems
|
* Operating Systems
|
||||||
* [Atomic](docs/atomic.md)
|
* [Atomic](docs/atomic.md)
|
||||||
|
|||||||
@@ -35,12 +35,12 @@ Below is a complete inventory example:
|
|||||||
```
|
```
|
||||||
## Configure 'ip' variable to bind kubernetes services on a
|
## Configure 'ip' variable to bind kubernetes services on a
|
||||||
## different ip than the default iface
|
## different ip than the default iface
|
||||||
node1 ansible_ssh_host=95.54.0.12 ip=10.3.0.1
|
node1 ansible_host=95.54.0.12 ip=10.3.0.1
|
||||||
node2 ansible_ssh_host=95.54.0.13 ip=10.3.0.2
|
node2 ansible_host=95.54.0.13 ip=10.3.0.2
|
||||||
node3 ansible_ssh_host=95.54.0.14 ip=10.3.0.3
|
node3 ansible_host=95.54.0.14 ip=10.3.0.3
|
||||||
node4 ansible_ssh_host=95.54.0.15 ip=10.3.0.4
|
node4 ansible_host=95.54.0.15 ip=10.3.0.4
|
||||||
node5 ansible_ssh_host=95.54.0.16 ip=10.3.0.5
|
node5 ansible_host=95.54.0.16 ip=10.3.0.5
|
||||||
node6 ansible_ssh_host=95.54.0.17 ip=10.3.0.6
|
node6 ansible_host=95.54.0.17 ip=10.3.0.6
|
||||||
|
|
||||||
[kube-master]
|
[kube-master]
|
||||||
node1
|
node1
|
||||||
@@ -70,7 +70,7 @@ The group variables to control main deployment options are located in the direct
|
|||||||
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
There are also role vars for docker, kubernetes preinstall and master roles.
|
||||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
those cannot be overridden from the group vars. In order to override, one should use
|
those cannot be overridden from the group vars. In order to override, one should use
|
||||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||||
@@ -175,7 +175,8 @@ simply add a line to your inventory, where you have to replace x.x.x.x with the
|
|||||||
bastion host.
|
bastion host.
|
||||||
|
|
||||||
```
|
```
|
||||||
bastion ansible_ssh_host=x.x.x.x
|
[bastion]
|
||||||
|
bastion ansible_host=x.x.x.x
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information about Ansible and bastion hosts, read
|
For more information about Ansible and bastion hosts, read
|
||||||
|
|||||||
16
docs/arch.md
Normal file
16
docs/arch.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
## Architecture compatibility
|
||||||
|
|
||||||
|
The following table shows the impact of the CPU architecture on compatible features:
|
||||||
|
- amd64: Cluster using only x86/amd64 CPUs
|
||||||
|
- arm64: Cluster using only arm64 CPUs
|
||||||
|
- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs
|
||||||
|
|
||||||
|
| kube_network_plugin | amd64 | arm64 | amd64 + arm64 |
|
||||||
|
| ------------------- | ----- | ----- | ------------- |
|
||||||
|
| Calico | Y | Y | Y |
|
||||||
|
| Weave | Y | Y | Y |
|
||||||
|
| Flannel | Y | N | N |
|
||||||
|
| Canal | Y | N | N |
|
||||||
|
| Cilium | Y | N | N |
|
||||||
|
| Contib | Y | N | N |
|
||||||
|
| kube-router | Y | N | N |
|
||||||
@@ -32,7 +32,7 @@ The name of the resource group your instances are in, can be retrieved via `azur
|
|||||||
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
||||||
|
|
||||||
#### azure\_subnet\_name
|
#### azure\_subnet\_name
|
||||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list RESOURCE_GROUP VNET_NAME`
|
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||||
|
|
||||||
#### azure\_security\_group\_name
|
#### azure\_security\_group\_name
|
||||||
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
||||||
@@ -40,17 +40,36 @@ The name of the network security group your instances are in, can be retrieved v
|
|||||||
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`azure ad app create --name kubernetes --identifier-uris http://kubernetes --home-page http://example.com --password CLIENT_SECRET`
|
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||||
The name, identifier-uri, home-page and the password can be choosen
|
display name, identifier-uri, homepage and the password can be choosen
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`azure ad sp create --applicationId AppId`
|
`azure ad sp create --id AppId`
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||||
|
|
||||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||||
|
|
||||||
|
#### azure\_loadbalancer\_sku
|
||||||
|
Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||||
|
|
||||||
|
#### azure\_exclude\_master\_from\_standard\_lb
|
||||||
|
azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer.
|
||||||
|
|
||||||
|
#### azure\_disable\_outbound\_snat
|
||||||
|
azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`.
|
||||||
|
|
||||||
|
#### azure\_primary\_availability\_set\_name
|
||||||
|
(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure
|
||||||
|
cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and
|
||||||
|
multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend
|
||||||
|
pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||||
|
|
||||||
|
#### azure\_use\_instance\_metadata
|
||||||
|
Use instance metadata service where possible
|
||||||
|
|
||||||
|
|
||||||
## Provisioning Azure with Resource Group Templates
|
## Provisioning Azure with Resource Group Templates
|
||||||
|
|
||||||
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||||
|
|||||||
@@ -119,13 +119,13 @@ recommended here:
|
|||||||
|
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
* `calico-rr` group with nodes in it. At the moment it's incompatible with
|
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
|
||||||
`kube-node` due to BGP port conflict with `calico-node` container. So you
|
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
|
||||||
should not have nodes in both `calico-rr` and `kube-node` groups.
|
group of `k8s-cluster` group.
|
||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
Here's an example of Kubespray inventory with route reflectors:
|
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||||
|
|
||||||
```
|
```
|
||||||
[all]
|
[all]
|
||||||
@@ -154,6 +154,7 @@ node5
|
|||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube-master
|
||||||
|
calico-rr
|
||||||
|
|
||||||
[calico-rr]
|
[calico-rr]
|
||||||
rr0
|
rr0
|
||||||
|
|||||||
10
docs/cni.md
Normal file
10
docs/cni.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
CNI
|
||||||
|
==============
|
||||||
|
|
||||||
|
This network plugin only unpacks CNI plugins version `cni_version` into `/opt/cni/bin` and instructs kubelet to use cni, that is adds following cli params:
|
||||||
|
|
||||||
|
`KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"`
|
||||||
|
|
||||||
|
It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward.
|
||||||
|
|
||||||
|
You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray.
|
||||||
@@ -114,10 +114,12 @@ The only exception is that ``hostNetwork: true`` PODs and non-k8s managed contai
|
|||||||
cluster service names.
|
cluster service names.
|
||||||
|
|
||||||
## Nodelocal DNS cache
|
## Nodelocal DNS cache
|
||||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns / core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||||
|
|
||||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
||||||
|
|
||||||
|
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||||
|
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
@@ -129,9 +131,7 @@ Limitations
|
|||||||
|
|
||||||
* There is
|
* There is
|
||||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||||
for the SkyDNS ``ndots`` param via an
|
for the SkyDNS ``ndots`` param.
|
||||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
|
||||||
add-on, while SkyDNS supports it though.
|
|
||||||
|
|
||||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
|
|||||||
@@ -3,23 +3,22 @@ Downloading binaries and containers
|
|||||||
|
|
||||||
Kubespray supports several download/upload modes. The default is:
|
Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
* Each node downloads binaries and container images on its own, which is
|
* Each node downloads binaries and container images on its own, which is ``download_run_once: False``.
|
||||||
``download_run_once: False``.
|
|
||||||
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
|
* For K8s apps, pull policy is ``k8s_image_pull_policy: IfNotPresent``.
|
||||||
* For system managed containers, like kubelet or etcd, pull policy is
|
* For system managed containers, like kubelet or etcd, pull policy is ``download_always_pull: False``, which is pull if only the wanted repo and tag/sha256 digest differs from that the host has.
|
||||||
``download_always_pull: False``, which is pull if only the wanted repo and
|
|
||||||
tag/sha256 digest differs from that the host has.
|
|
||||||
|
|
||||||
There is also a "pull once, push many" mode as well:
|
There is also a "pull once, push many" mode as well:
|
||||||
|
|
||||||
* Override the ``download_run_once: True`` to download container images only once
|
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
|
||||||
then push to cluster nodes in batches. The default delegate node
|
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
|
||||||
for pushing images is the first `kube-master`.
|
|
||||||
* If your ansible runner node (aka the admin node) have password-less sudo and
|
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
||||||
docker enabled, you may want to define the ``download_localhost: True``, which
|
|
||||||
makes that node a delegate for pushing images while running the deployment with
|
On caching:
|
||||||
ansible. This maybe the case if cluster nodes cannot access each over via ssh
|
|
||||||
or you want to use local docker images as a cache for multiple clusters.
|
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
|
||||||
|
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
|
||||||
|
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
||||||
|
|
||||||
Container images and binary files are described by the vars like ``foo_version``,
|
Container images and binary files are described by the vars like ``foo_version``,
|
||||||
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||||
@@ -29,15 +28,14 @@ Container images may be defined by its repo and tag, for example:
|
|||||||
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
|
`andyshinn/dnsmasq:2.72`. Or by repo and tag and sha256 digest:
|
||||||
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
|
`andyshinn/dnsmasq@sha256:7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193`.
|
||||||
|
|
||||||
Note, the sha256 digest and the image tag must be both specified and correspond
|
Note, the SHA256 digest and the image tag must be both specified and correspond
|
||||||
to each other. The given example above is represented by the following vars:
|
to each other. The given example above is represented by the following vars:
|
||||||
```
|
```yaml
|
||||||
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
||||||
dnsmasq_image_repo: andyshinn/dnsmasq
|
dnsmasq_image_repo: andyshinn/dnsmasq
|
||||||
dnsmasq_image_tag: '2.72'
|
dnsmasq_image_tag: '2.72'
|
||||||
```
|
```
|
||||||
The full list of available vars may be found in the download's ansible role defaults.
|
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
|
||||||
Those also allow to specify custom urls and local repositories for binaries and container
|
|
||||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||||
so the hosts can resolve those urls and repos.
|
so the hosts can resolve those urls and repos.
|
||||||
|
|
||||||
@@ -46,9 +44,9 @@ so the hosts can resolve those urls and repos.
|
|||||||
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
|
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
|
||||||
|
|
||||||
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
||||||
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
NOTE: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||||
* `pyrepo_index` (and optionally `pyrepo_cert`)
|
* `pyrepo_index` (and optionally `pyrepo_cert`)
|
||||||
* Depending on the `container_manager`
|
* Depending on the `container_manager`
|
||||||
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
||||||
* When `container_manager=crio`, `crio_rhel_repo_base_url`
|
* When `container_manager=crio`, `crio_rhel_repo_base_url`
|
||||||
* When using Helm, `helm_stable_repo_url`
|
* When using Helm, `helm_stable_repo_url`
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ Building your own inventory
|
|||||||
|
|
||||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||||
an example inventory located
|
an example inventory located
|
||||||
[here](https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/hosts.ini).
|
[here](https://github.com/kubernetes-sigs/kubespray/blob/master/inventory/sample/inventory.ini).
|
||||||
|
|
||||||
You can use an
|
You can use an
|
||||||
[inventory generator](https://github.com/kubernetes-sigs/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
[inventory generator](https://github.com/kubernetes-sigs/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||||
@@ -20,7 +20,9 @@ Example inventory generator usage:
|
|||||||
|
|
||||||
cp -r inventory/sample inventory/mycluster
|
cp -r inventory/sample inventory/mycluster
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
|
Then use `inventory/mycluster/hosts.yml` as inventory file.
|
||||||
|
|
||||||
Starting custom deployment
|
Starting custom deployment
|
||||||
--------------------------
|
--------------------------
|
||||||
@@ -30,7 +32,7 @@ and start the deployment:
|
|||||||
|
|
||||||
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
See more details in the [ansible guide](ansible.md).
|
See more details in the [ansible guide](ansible.md).
|
||||||
@@ -43,26 +45,33 @@ You may want to add worker, master or etcd nodes to your existing cluster. This
|
|||||||
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
|
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
Remove nodes
|
Remove nodes
|
||||||
------------
|
------------
|
||||||
|
|
||||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
You may want to remove **master**, **worker**, or **etcd** nodes from your
|
||||||
|
existing cluster. This can be done by re-running the `remove-node.yml`
|
||||||
|
playbook. First, all specified nodes will be drained, then stop some
|
||||||
|
kubernetes services and delete some certificates,
|
||||||
|
and finally execute the kubectl command to delete these nodes.
|
||||||
|
This can be combined with the add node function. This is generally helpful
|
||||||
|
when doing something like autoscaling your clusters. Of course, if a node
|
||||||
|
is not working, you can remove the node and install it again.
|
||||||
|
|
||||||
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
|
||||||
--private-key=~/.ssh/private_key
|
|
||||||
|
|
||||||
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key \
|
--private-key=~/.ssh/private_key \
|
||||||
--extra-vars "node=nodename,nodename2"
|
--extra-vars "node=nodename,nodename2"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
|
||||||
|
to skip the node reset step. If one node is unavailable, but others you wish
|
||||||
|
to remove are able to connect via SSH, you could set reset_nodes=no as a host
|
||||||
|
var in inventory.
|
||||||
|
|
||||||
Connecting to Kubernetes
|
Connecting to Kubernetes
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ where an external LB or virtual IP management is inconvenient. This option is
|
|||||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||||
You may also define the port the local internal loadbalancer uses by changing,
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
`nginx_kube_apiserver_port`. This defaults to the value of
|
`loadbalancer_apiserver_port`. This defaults to the value of
|
||||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||||
loadbalancer.
|
loadbalancer.
|
||||||
@@ -114,7 +114,7 @@ Where:
|
|||||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||||
* `lc` - localhost;
|
* `lc` - localhost;
|
||||||
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
|
* `nsp` - nginx secure port, `loadbalancer_apiserver_port`, defers to `sp`;
|
||||||
* `sp` - secure port, `kube_apiserver_port`;
|
* `sp` - secure port, `kube_apiserver_port`;
|
||||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||||
* `ip` - the node IP, defers to the ansible IP;
|
* `ip` - the node IP, defers to the ansible IP;
|
||||||
|
|||||||
48
docs/kube-ovn.md
Normal file
48
docs/kube-ovn.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
Kube-OVN
|
||||||
|
===========
|
||||||
|
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
|
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
|
||||||
|
|
||||||
|
## How to use it
|
||||||
|
|
||||||
|
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||||
|
```
|
||||||
|
...
|
||||||
|
kube_network_plugin: kube-ovn
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verifying kube-ovn install
|
||||||
|
|
||||||
|
Kube-OVN run ovn and controller in `kube-ovn` namespace
|
||||||
|
|
||||||
|
* Check the status of kube-ovn pods
|
||||||
|
|
||||||
|
```
|
||||||
|
# From the CLI
|
||||||
|
kubectl get pod -n kube-ovn
|
||||||
|
|
||||||
|
# Output
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
kube-ovn-cni-49lsm 1/1 Running 0 2d20h
|
||||||
|
kube-ovn-cni-9db8f 1/1 Running 0 2d20h
|
||||||
|
kube-ovn-cni-wftdk 1/1 Running 0 2d20h
|
||||||
|
kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h
|
||||||
|
ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h
|
||||||
|
ovs-ovn-hqn8p 1/1 Running 0 4d16h
|
||||||
|
ovs-ovn-hvpl8 1/1 Running 0 4d16h
|
||||||
|
ovs-ovn-r5frh 1/1 Running 0 4d16h
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check the default and node subnet
|
||||||
|
|
||||||
|
```
|
||||||
|
# From the CLI
|
||||||
|
kubectl get subnet
|
||||||
|
|
||||||
|
# Output
|
||||||
|
NAME PROTOCOL CIDR PRIVATE NAT
|
||||||
|
join IPv4 100.64.0.0/16 false false
|
||||||
|
ovn-default IPv4 10.16.0.0/16 false true
|
||||||
|
```
|
||||||
48
docs/macvlan.md
Normal file
48
docs/macvlan.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
Macvlan
|
||||||
|
===============
|
||||||
|
|
||||||
|
How to use it :
|
||||||
|
-------------
|
||||||
|
|
||||||
|
|
||||||
|
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||||
|
```
|
||||||
|
...
|
||||||
|
kube_network_plugin: macvlan
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
|
||||||
|
```
|
||||||
|
all:
|
||||||
|
hosts:
|
||||||
|
node1:
|
||||||
|
ip: 10.2.2.1
|
||||||
|
access_ip: 10.2.2.1
|
||||||
|
ansible_host: 10.2.2.1
|
||||||
|
macvlan_interface: ens5
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Issue encountered :
|
||||||
|
-------------
|
||||||
|
|
||||||
|
- Service DNS
|
||||||
|
|
||||||
|
reply from unexpected source:
|
||||||
|
|
||||||
|
add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
||||||
|
|
||||||
|
|
||||||
|
- Disable nodelocaldns
|
||||||
|
|
||||||
|
The nodelocal dns IP is not reacheable.
|
||||||
|
|
||||||
|
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
|
||||||
|
```
|
||||||
|
enable_nodelocaldns: false
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
57
docs/recover-control-plane.md
Normal file
57
docs/recover-control-plane.md
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
|
||||||
|
Recovering the control plane
|
||||||
|
============================
|
||||||
|
|
||||||
|
To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook.
|
||||||
|
|
||||||
|
* Backup what you can
|
||||||
|
* Provision new nodes to replace the broken ones
|
||||||
|
* Place the surviving nodes of the control plane first in the "etcd" and "kube-master" groups
|
||||||
|
* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube-master" groups
|
||||||
|
|
||||||
|
Examples of what broken means in this context:
|
||||||
|
|
||||||
|
* One or more bare metal node(s) suffer from unrecoverable hardware failure
|
||||||
|
* One or more node(s) fail during patching or upgrading
|
||||||
|
* Etcd database corruption
|
||||||
|
* Other node related failures leaving your control plane degraded or nonfunctional
|
||||||
|
|
||||||
|
__Note that you need at least one functional node to be able to recover using this method.__
|
||||||
|
|
||||||
|
## If etcd quorum is intact
|
||||||
|
|
||||||
|
* Set the etcd member names of the broken node(s) in the variable "old\_etcd\_members", this variable is used to remove the broken nodes from the etcd cluster.
|
||||||
|
```old_etcd_members=etcd2,etcd3```
|
||||||
|
* If you reuse identities for your etcd nodes add the inventory names for those nodes to the variable "old\_etcds". This will remove any previously generated certificates for those nodes.
|
||||||
|
```old_etcds=etcd2.example.com,etcd3.example.com```
|
||||||
|
* If you would like to remove the broken node objects from the kubernetes cluster add their inventory names to the variable "old\_kube\_masters"
|
||||||
|
```old_kube_masters=master2.example.com,master3.example.com```
|
||||||
|
|
||||||
|
Then run the playbook with ```--limit etcd,kube-master```
|
||||||
|
|
||||||
|
When finished you should have a fully working and highly available control plane again.
|
||||||
|
|
||||||
|
## If etcd quorum is lost
|
||||||
|
|
||||||
|
* If you reuse identities for your etcd nodes add the inventory names for those nodes to the variable "old\_etcds". This will remove any previously generated certificates for those nodes.
|
||||||
|
```old_etcds=etcd2.example.com,etcd3.example.com```
|
||||||
|
* If you would like to remove the broken node objects from the kubernetes cluster add their inventory names to the variable "old\_kube\_masters"
|
||||||
|
```old_kube_masters=master2.example.com,master3.example.com```
|
||||||
|
|
||||||
|
Then run the playbook with ```--limit etcd,kube-master```
|
||||||
|
|
||||||
|
When finished you should have a fully working and highly available control plane again.
|
||||||
|
|
||||||
|
The playbook will attempt to take a snapshot from the first node in the "etcd" group and restore from that. If you would like to restore from an alternate snapshot set the path to that snapshot in the "etcd\_snapshot" variable.
|
||||||
|
|
||||||
|
```etcd_snapshot=/tmp/etcd_snapshot```
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
* The playbook has only been tested on control planes where the etcd and kube-master nodes are the same, the playbook will warn if run on a cluster with separate etcd and kube-master nodes.
|
||||||
|
* The playbook has only been tested with fairly small etcd databases.
|
||||||
|
* If your new control plane nodes have new ip addresses you may have to change settings in various places.
|
||||||
|
* There may be disruptions while running the playbook.
|
||||||
|
* There are absolutely no guarantees.
|
||||||
|
|
||||||
|
If possible try to break a cluster in the same way that your target cluster is broken and test to recover that before trying on the real target cluster.
|
||||||
@@ -2,7 +2,7 @@ Kubespray's roadmap
|
|||||||
=================
|
=================
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
|
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
|
||||||
- to be discussed, a way to provide the inventory
|
- to be discussed, a way to provide the inventory
|
||||||
@@ -13,6 +13,7 @@ Kubespray's roadmap
|
|||||||
- [ ] GCE
|
- [ ] GCE
|
||||||
- [x] AWS (contrib/terraform/aws)
|
- [x] AWS (contrib/terraform/aws)
|
||||||
- [x] Openstack (contrib/terraform/openstack)
|
- [x] Openstack (contrib/terraform/openstack)
|
||||||
|
- [x] Packet
|
||||||
- [ ] Digital Ocean
|
- [ ] Digital Ocean
|
||||||
- [ ] Azure
|
- [ ] Azure
|
||||||
- [ ] On AWS autoscaling, multi AZ
|
- [ ] On AWS autoscaling, multi AZ
|
||||||
@@ -23,11 +24,11 @@ Kubespray's roadmap
|
|||||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
- [ ] Run kubernetes e2e tests
|
- [x] Run kubernetes e2e tests
|
||||||
- [ ] Test idempotency on single OS but for all network plugins/container engines
|
- [ ] Test idempotency on single OS but for all network plugins/container engines
|
||||||
- [ ] single test on AWS per day
|
- [ ] single test on AWS per day
|
||||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
- [ ] Reorganize CI test vars into group var files
|
- [x] Reorganize CI test vars into group var files
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||||
@@ -42,23 +43,10 @@ Kubespray's roadmap
|
|||||||
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
|
|
||||||
### Addons (helm or native ansible)
|
### Addons (helm or native ansible)
|
||||||
Include optionals deployments to init the cluster:
|
- [x] Helm
|
||||||
##### Monitoring
|
- [x] Ingress-nginx
|
||||||
- Heapster / Grafana ....
|
- [x] kubernetes-dashboard
|
||||||
- **Prometheus**
|
|
||||||
|
|
||||||
##### Others
|
|
||||||
|
|
||||||
##### Dashboards:
|
|
||||||
- kubernetes-dashboard
|
|
||||||
- Fabric8
|
|
||||||
- Tectonic
|
|
||||||
- Cockpit
|
|
||||||
|
|
||||||
##### Paas like
|
|
||||||
- Openshift Origin
|
|
||||||
- Openstack
|
|
||||||
- Deis Workflow
|
|
||||||
|
|
||||||
### Others
|
### Others
|
||||||
- Organize and update documentation (split in categories)
|
- Organize and update documentation (split in categories)
|
||||||
|
|||||||
@@ -1,24 +1,3 @@
|
|||||||
Travis CI test matrix
|
|
||||||
=====================
|
|
||||||
|
|
||||||
GCE instances
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Here is the test matrix for the CI gates:
|
|
||||||
|
|
||||||
| Network plugin| OS type| GCE region| Nodes layout|
|
|
||||||
|-------------------------|-------------------------|-------------------------|-------------------------|
|
|
||||||
| canal| debian-8-kubespray| asia-east1-a| ha-scale|
|
|
||||||
| calico| debian-8-kubespray| europe-west1-c| default|
|
|
||||||
| flannel| centos-7| asia-northeast1-c| default|
|
|
||||||
| calico| centos-7| us-central1-b| ha|
|
|
||||||
| weave| rhel-7| us-east1-c| default|
|
|
||||||
| canal| coreos-stable| us-west1-b| ha-scale|
|
|
||||||
| canal| rhel-7| asia-northeast1-b| separate|
|
|
||||||
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
|
|
||||||
| calico| coreos-stable| us-central1-f| separate|
|
|
||||||
|
|
||||||
|
|
||||||
Node Layouts
|
Node Layouts
|
||||||
------------
|
------------
|
||||||
|
|
||||||
@@ -41,15 +20,6 @@ never actually deployed, but certificates are generated for them.
|
|||||||
|
|
||||||
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
||||||
|
|
||||||
Hint: the command
|
|
||||||
```
|
|
||||||
bash scripts/gen_matrix.sh
|
|
||||||
```
|
|
||||||
will (hopefully) generate the CI test cases from the current ``.travis.yml``.
|
|
||||||
|
|
||||||
Gitlab CI test matrix
|
|
||||||
=====================
|
|
||||||
|
|
||||||
GCE instances
|
GCE instances
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
|||||||
164
docs/vagrant.md
164
docs/vagrant.md
@@ -1,69 +1,129 @@
|
|||||||
Vagrant Install
|
Introduction
|
||||||
=================
|
============
|
||||||
|
|
||||||
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`.
|
||||||
with vmware, but is untested) you should be able to launch a 3 node
|
|
||||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
|
||||||
|
|
||||||
This will spin up 3 VMs and install kubernetes on them. Once they are
|
To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time.
|
||||||
completed you can connect to any of them by running <br />
|
|
||||||
`$ vagrant ssh k8s-0[1..3]`.
|
|
||||||
|
|
||||||
```
|
For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well.
|
||||||
$ vagrant up
|
|
||||||
Bringing machine 'k8s-01' up with 'virtualbox' provider...
|
|
||||||
Bringing machine 'k8s-02' up with 'virtualbox' provider...
|
|
||||||
Bringing machine 'k8s-03' up with 'virtualbox' provider...
|
|
||||||
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
|
|
||||||
...
|
|
||||||
...
|
|
||||||
k8s-03: Running ansible-playbook...
|
|
||||||
|
|
||||||
PLAY [k8s-cluster] *************************************************************
|
|
||||||
|
|
||||||
TASK [setup] *******************************************************************
|
|
||||||
ok: [k8s-03]
|
|
||||||
ok: [k8s-01]
|
|
||||||
ok: [k8s-02]
|
|
||||||
...
|
|
||||||
...
|
|
||||||
PLAY RECAP *********************************************************************
|
|
||||||
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
|
|
||||||
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
|
|
||||||
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
|
|
||||||
|
|
||||||
$ vagrant ssh k8s-01
|
|
||||||
vagrant@k8s-01:~$ kubectl get nodes
|
|
||||||
NAME STATUS AGE
|
|
||||||
k8s-01 Ready 45s
|
|
||||||
k8s-02 Ready 45s
|
|
||||||
k8s-03 Ready 45s
|
|
||||||
```
|
|
||||||
|
|
||||||
Customize Vagrant
|
Customize Vagrant
|
||||||
=================
|
=================
|
||||||
|
|
||||||
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
|
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below.
|
||||||
or through an override file.
|
|
||||||
|
|
||||||
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
|
|
||||||
|
|
||||||
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
|
|
||||||
e.g.:
|
|
||||||
|
|
||||||
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
|
|
||||||
|
|
||||||
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
|
|
||||||
|
|
||||||
Use alternative OS for Vagrant
|
Use alternative OS for Vagrant
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
|
By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster.
|
||||||
operating system for your local cluster.
|
|
||||||
|
|
||||||
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
||||||
|
|
||||||
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
||||||
|
|
||||||
|
|
||||||
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
||||||
|
|
||||||
|
File and image caching
|
||||||
|
======================
|
||||||
|
|
||||||
|
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](docs/downloads.md).
|
||||||
|
|
||||||
|
Example use of Vagrant
|
||||||
|
======================
|
||||||
|
|
||||||
|
The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>.
|
||||||
|
|
||||||
|
```
|
||||||
|
# use virtualenv to install all python requirements
|
||||||
|
VENVDIR=venv
|
||||||
|
virtualenv --python=/usr/bin/python3.7 $VENVDIR
|
||||||
|
source $VENVDIR/bin/activate
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
# prepare an inventory to test with
|
||||||
|
INV=inventory/my_lab
|
||||||
|
rm -rf ${INV}.bak &> /dev/null
|
||||||
|
mv ${INV} ${INV}.bak &> /dev/null
|
||||||
|
cp -a inventory/sample ${INV}
|
||||||
|
rm -f ${INV}/hosts.ini
|
||||||
|
|
||||||
|
# customize the vagrant environment
|
||||||
|
mkdir vagrant
|
||||||
|
cat << EOF > vagrant/config.rb
|
||||||
|
\$instance_name_prefix = "kub"
|
||||||
|
\$vm_cpus = 1
|
||||||
|
\$num_instances = 3
|
||||||
|
\$os = "centos-bento"
|
||||||
|
\$subnet = "10.0.20"
|
||||||
|
\$network_plugin = "flannel"
|
||||||
|
\$inventory = "$INV"
|
||||||
|
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# make the rpm cache
|
||||||
|
mkdir -p temp/docker_rpms
|
||||||
|
|
||||||
|
vagrant up
|
||||||
|
|
||||||
|
# make a copy of the downloaded docker rpm, to speed up the next provisioning run
|
||||||
|
scp kub-1:/var/cache/yum/x86_64/7/docker-ce/packages/* temp/docker_rpms/
|
||||||
|
|
||||||
|
# copy kubectl access configuration in place
|
||||||
|
mkdir $HOME/.kube/ &> /dev/null
|
||||||
|
ln -s $INV/artifacts/admin.conf $HOME/.kube/config
|
||||||
|
# make the kubectl binary available
|
||||||
|
sudo ln -s $INV/artifacts/kubectl /usr/local/bin/kubectl
|
||||||
|
#or
|
||||||
|
export PATH=$PATH:$INV/artifacts
|
||||||
|
```
|
||||||
|
If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible:
|
||||||
|
```
|
||||||
|
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
|
||||||
|
```
|
||||||
|
If all went well, you check if it's all working as expected:
|
||||||
|
```
|
||||||
|
kubectl get nodes
|
||||||
|
```
|
||||||
|
The output should look like this:
|
||||||
|
```
|
||||||
|
$ kubectl get nodes
|
||||||
|
NAME STATUS ROLES AGE VERSION
|
||||||
|
kub-1 Ready master 32m v1.14.1
|
||||||
|
kub-2 Ready master 31m v1.14.1
|
||||||
|
kub-3 Ready <none> 31m v1.14.1
|
||||||
|
```
|
||||||
|
Another nice test is the following:
|
||||||
|
```
|
||||||
|
kubectl get po --all-namespaces -o wide
|
||||||
|
```
|
||||||
|
Which should yield something like the following:
|
||||||
|
```
|
||||||
|
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||||
|
kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none>
|
||||||
|
kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none>
|
||||||
|
kube-system dns-autoscaler-5fc5fdbf6-5c48k 1/1 Running 0 31m 10.233.66.3 kub-3 <none> <none>
|
||||||
|
kube-system kube-apiserver-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
kube-system kube-apiserver-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system kube-controller-manager-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
kube-system kube-controller-manager-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system kube-flannel-8tgcn 2/2 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||||
|
kube-system kube-flannel-b2hgt 2/2 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
kube-system kube-flannel-zx4bc 2/2 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system kube-proxy-4bjdn 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system kube-proxy-l5tt5 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||||
|
kube-system kube-proxy-x59q8 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
kube-system kube-scheduler-kub-1 1/1 Running 0 32m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
kube-system kube-scheduler-kub-2 1/1 Running 0 32m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system kubernetes-dashboard-6c7466966c-jqz42 1/1 Running 0 31m 10.233.66.4 kub-3 <none> <none>
|
||||||
|
kube-system nginx-proxy-kub-3 1/1 Running 0 32m 10.0.20.103 kub-3 <none> <none>
|
||||||
|
kube-system nodelocaldns-2x7vh 1/1 Running 0 31m 10.0.20.102 kub-2 <none> <none>
|
||||||
|
kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
|
||||||
|
kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
|
||||||
|
```
|
||||||
|
Create clusteradmin rbac and get the login token for the dashboard:
|
||||||
|
```
|
||||||
|
kubectl create -f contrib/misc/clusteradmin-rbac.yml
|
||||||
|
kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$'
|
||||||
|
```
|
||||||
|
Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login).
|
||||||
|
|
||||||
|
|||||||
17
docs/vars.md
17
docs/vars.md
@@ -57,10 +57,16 @@ following default cluster parameters:
|
|||||||
10.233.0.0/18). Must not overlap with kube_pods_subnet
|
10.233.0.0/18). Must not overlap with kube_pods_subnet
|
||||||
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||||
overlap with kube_service_addresses.
|
overlap with kube_service_addresses.
|
||||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
|
||||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||||
|
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
|
||||||
|
on the CoreDNS service.
|
||||||
|
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
|
||||||
|
(default is k8s_external.local)
|
||||||
|
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
|
||||||
|
on the CoreDNS service.
|
||||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
@@ -98,11 +104,12 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
|||||||
|
|
||||||
* *docker_options* - Commonly used to set
|
* *docker_options* - Commonly used to set
|
||||||
``--insecure-registry=myregistry.mydomain:5000``
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
|
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||||
that correspond to each node.
|
that correspond to each node.
|
||||||
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||||
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
|
Available options are ``host`` and ``docker``. ``docker`` mode
|
||||||
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||||
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
||||||
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
||||||
@@ -113,15 +120,17 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
|||||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||||
cgroup-driver option for Kubelet. By default autodetection is used
|
cgroup-driver option for Kubelet. By default autodetection is used
|
||||||
to match Docker configuration.
|
to match Docker configuration.
|
||||||
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
||||||
from the kube-apiserver when the certificate expiration approaches.
|
from the kube-apiserver when the certificate expiration approaches.
|
||||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
*node_labels* must be defined as a dict:
|
*node_labels* can be defined either as a dict or a comma-separated labels string:
|
||||||
```
|
```
|
||||||
node_labels:
|
node_labels:
|
||||||
label1_name: label1_value
|
label1_name: label1_value
|
||||||
label2_name: label2_value
|
label2_name: label2_value
|
||||||
|
|
||||||
|
node_labels: "label1_name=label1_value,label2_name=label2_value"
|
||||||
```
|
```
|
||||||
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
|
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
|
||||||
For example, taints can be set in the inventory as variables or more widely in group_vars.
|
For example, taints can be set in the inventory as variables or more widely in group_vars.
|
||||||
|
|||||||
@@ -27,12 +27,6 @@
|
|||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
vars:
|
|
||||||
ansible_ssh_pipelining: true
|
|
||||||
gather_facts: true
|
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -37,6 +37,7 @@
|
|||||||
loadSidebar: 'docs/_sidebar.md',
|
loadSidebar: 'docs/_sidebar.md',
|
||||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||||
auto2top: true,
|
auto2top: true,
|
||||||
|
logo: '/logo/logo-clear.png'
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
<script src="//unpkg.com/docsify/lib/docsify.min.js"></script>
|
<script src="//unpkg.com/docsify/lib/docsify.min.js"></script>
|
||||||
|
|||||||
@@ -12,3 +12,4 @@ node1
|
|||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube-master
|
||||||
|
calico-rr
|
||||||
|
|||||||
@@ -2,6 +2,9 @@
|
|||||||
## Directory where etcd data stored
|
## Directory where etcd data stored
|
||||||
etcd_data_dir: /var/lib/etcd
|
etcd_data_dir: /var/lib/etcd
|
||||||
|
|
||||||
|
## Experimental kubeadm etcd deployment mode. Available only for new deployment
|
||||||
|
etcd_kubeadm_enabled: false
|
||||||
|
|
||||||
## Directory where the binaries will be installed
|
## Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
@@ -21,12 +24,15 @@ bin_dir: /usr/local/bin
|
|||||||
|
|
||||||
## Internal loadbalancers for apiservers
|
## Internal loadbalancers for apiservers
|
||||||
# loadbalancer_apiserver_localhost: true
|
# loadbalancer_apiserver_localhost: true
|
||||||
|
# valid options are "nginx" or "haproxy"
|
||||||
|
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
||||||
|
|
||||||
## Local loadbalancer should use this port
|
## Local loadbalancer should use this port
|
||||||
## And must be set port 6443
|
## And must be set port 6443
|
||||||
nginx_kube_apiserver_port: 6443
|
loadbalancer_apiserver_port: 6443
|
||||||
## If nginx_kube_apiserver_healthcheck_port variable defined, enables proxy liveness check.
|
|
||||||
nginx_kube_apiserver_healthcheck_port: 8081
|
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
|
||||||
|
loadbalancer_apiserver_healthcheck_port: 8081
|
||||||
|
|
||||||
### OTHER OPTIONAL VARIABLES
|
### OTHER OPTIONAL VARIABLES
|
||||||
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
# # When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||||
# openstack_blockstorage_ignore_volume_az: yes
|
# openstack_blockstorage_ignore_volume_az: yes
|
||||||
# # When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||||
# openstack_lbaas_enabled: True
|
# openstack_lbaas_enabled: True
|
||||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||||
# # To enable automatic floating ip provisioning, specify a subnet.
|
## To enable automatic floating ip provisioning, specify a subnet.
|
||||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||||
# # Override default LBaaS behavior
|
## Override default LBaaS behavior
|
||||||
# openstack_lbaas_use_octavia: False
|
# openstack_lbaas_use_octavia: False
|
||||||
# openstack_lbaas_method: "ROUND_ROBIN"
|
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||||
# openstack_lbaas_provider: "haproxy"
|
# openstack_lbaas_provider: "haproxy"
|
||||||
|
|||||||
@@ -35,6 +35,8 @@ local_volume_provisioner_enabled: false
|
|||||||
# local-storage:
|
# local-storage:
|
||||||
# host_dir: /mnt/disks
|
# host_dir: /mnt/disks
|
||||||
# mount_dir: /mnt/disks
|
# mount_dir: /mnt/disks
|
||||||
|
# volume_mode: Filesystem
|
||||||
|
# fs_type: ext4
|
||||||
# fast-disks:
|
# fast-disks:
|
||||||
# host_dir: /mnt/fast-disks
|
# host_dir: /mnt/fast-disks
|
||||||
# mount_dir: /mnt/fast-disks
|
# mount_dir: /mnt/fast-disks
|
||||||
@@ -56,11 +58,31 @@ cephfs_provisioner_enabled: false
|
|||||||
# cephfs_provisioner_claim_root: /volumes
|
# cephfs_provisioner_claim_root: /volumes
|
||||||
# cephfs_provisioner_deterministic_names: true
|
# cephfs_provisioner_deterministic_names: true
|
||||||
|
|
||||||
|
# RBD provisioner deployment
|
||||||
|
rbd_provisioner_enabled: false
|
||||||
|
# rbd_provisioner_namespace: rbd-provisioner
|
||||||
|
# rbd_provisioner_replicas: 2
|
||||||
|
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||||
|
# rbd_provisioner_pool: kube
|
||||||
|
# rbd_provisioner_admin_id: admin
|
||||||
|
# rbd_provisioner_secret_name: ceph-secret-admin
|
||||||
|
# rbd_provisioner_secret: ceph-key-admin
|
||||||
|
# rbd_provisioner_user_id: kube
|
||||||
|
# rbd_provisioner_user_secret_name: ceph-secret-user
|
||||||
|
# rbd_provisioner_user_secret: ceph-key-user
|
||||||
|
# rbd_provisioner_user_secret_namespace: rbd-provisioner
|
||||||
|
# rbd_provisioner_fs_type: ext4
|
||||||
|
# rbd_provisioner_image_format: "2"
|
||||||
|
# rbd_provisioner_image_features: layering
|
||||||
|
# rbd_provisioner_storage_class: rbd
|
||||||
|
# rbd_provisioner_reclaim_policy: Delete
|
||||||
|
|
||||||
# Nginx ingress controller deployment
|
# Nginx ingress controller deployment
|
||||||
ingress_nginx_enabled: false
|
ingress_nginx_enabled: false
|
||||||
# ingress_nginx_host_network: false
|
# ingress_nginx_host_network: false
|
||||||
|
ingress_publish_status_address: ""
|
||||||
# ingress_nginx_nodeselector:
|
# ingress_nginx_nodeselector:
|
||||||
# node-role.kubernetes.io/node: ""
|
# beta.kubernetes.io/os: "linux"
|
||||||
# ingress_nginx_tolerations:
|
# ingress_nginx_tolerations:
|
||||||
# - key: "node-role.kubernetes.io/master"
|
# - key: "node-role.kubernetes.io/master"
|
||||||
# operator: "Equal"
|
# operator: "Equal"
|
||||||
@@ -75,7 +97,7 @@ ingress_nginx_enabled: false
|
|||||||
# ingress_nginx_configmap_tcp_services:
|
# ingress_nginx_configmap_tcp_services:
|
||||||
# 9000: "default/example-go:8080"
|
# 9000: "default/example-go:8080"
|
||||||
# ingress_nginx_configmap_udp_services:
|
# ingress_nginx_configmap_udp_services:
|
||||||
# 53: "kube-system/kube-dns:53"
|
# 53: "kube-system/coredns:53"
|
||||||
|
|
||||||
# Cert manager deployment
|
# Cert manager deployment
|
||||||
cert_manager_enabled: false
|
cert_manager_enabled: false
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
|||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.13.5
|
kube_version: v1.15.11
|
||||||
|
|
||||||
# kubernetes image repo define
|
# kubernetes image repo define
|
||||||
kube_image_repo: "gcr.io/google-containers"
|
kube_image_repo: "gcr.io/google-containers"
|
||||||
@@ -70,7 +70,7 @@ kube_users:
|
|||||||
# kube_oidc_groups_prefix: oidc:
|
# kube_oidc_groups_prefix: oidc:
|
||||||
|
|
||||||
|
|
||||||
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
# Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin)
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
@@ -132,8 +132,14 @@ dns_mode: coredns
|
|||||||
# Set manual server if using a custom cluster DNS server
|
# Set manual server if using a custom cluster DNS server
|
||||||
# manual_dns_server: 10.x.x.x
|
# manual_dns_server: 10.x.x.x
|
||||||
# Enable nodelocal dns cache
|
# Enable nodelocal dns cache
|
||||||
enable_nodelocaldns: False
|
enable_nodelocaldns: true
|
||||||
nodelocaldns_ip: 169.254.25.10
|
nodelocaldns_ip: 169.254.25.10
|
||||||
|
nodelocaldns_health_port: 9254
|
||||||
|
# Enable k8s_external plugin for CoreDNS
|
||||||
|
enable_coredns_k8s_external: false
|
||||||
|
coredns_k8s_external_zone: k8s_external.local
|
||||||
|
# Enable endpoint_pod_names option for kubernetes plugin
|
||||||
|
enable_coredns_k8s_endpoint_pod_names: false
|
||||||
|
|
||||||
# Can be docker_dns, host_resolvconf or none
|
# Can be docker_dns, host_resolvconf or none
|
||||||
resolvconf_mode: docker_dns
|
resolvconf_mode: docker_dns
|
||||||
@@ -145,7 +151,7 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
|
|||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
## Container runtime
|
## Container runtime
|
||||||
## docker for docker and crio for cri-o.
|
## docker for docker, crio for cri-o and containerd for containerd.
|
||||||
container_manager: docker
|
container_manager: docker
|
||||||
|
|
||||||
## Settings for containerized control plane (etcd/kubelet/secrets)
|
## Settings for containerized control plane (etcd/kubelet/secrets)
|
||||||
@@ -153,6 +159,10 @@ etcd_deployment_type: docker
|
|||||||
kubelet_deployment_type: host
|
kubelet_deployment_type: host
|
||||||
helm_deployment_type: host
|
helm_deployment_type: host
|
||||||
|
|
||||||
|
# Enable kubeadm experimental control plane
|
||||||
|
kubeadm_control_plane: false
|
||||||
|
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
|
||||||
|
|
||||||
# K8s image pull policy (imagePullPolicy)
|
# K8s image pull policy (imagePullPolicy)
|
||||||
k8s_image_pull_policy: IfNotPresent
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
@@ -183,6 +193,18 @@ podsecuritypolicy_enabled: false
|
|||||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||||
# kubelet_enforce_node_allocatable: pods
|
# kubelet_enforce_node_allocatable: pods
|
||||||
|
|
||||||
|
## Optionally reserve resources for OS system daemons.
|
||||||
|
# system_reserved: true
|
||||||
|
## Uncomment to override default values
|
||||||
|
# system_memory_reserved: 512M
|
||||||
|
# system_cpu_reserved: 500m
|
||||||
|
## Reservation for master hosts
|
||||||
|
# system_master_memory_reserved: 256M
|
||||||
|
# system_master_cpu_reserved: 250m
|
||||||
|
|
||||||
|
# An alternative flexvolume plugin directory
|
||||||
|
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
|
||||||
|
|
||||||
## Supplementary addresses that can be added in kubernetes ssl keys.
|
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||||
## That can be useful for example to setup a keepalived virtual IP
|
## That can be useful for example to setup a keepalived virtual IP
|
||||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||||
|
|||||||
@@ -24,3 +24,12 @@
|
|||||||
|
|
||||||
# Advertise Cluster IPs
|
# Advertise Cluster IPs
|
||||||
# calico_advertise_cluster_ips: true
|
# calico_advertise_cluster_ips: true
|
||||||
|
|
||||||
|
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||||
|
# calico_datastore: "etcd"
|
||||||
|
|
||||||
|
# Use typha (only with kdd)
|
||||||
|
# typha_enabled: false
|
||||||
|
|
||||||
|
# Number of typha replicas
|
||||||
|
# typha_replicas: 1
|
||||||
|
|||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# private interface, on a l2-network
|
||||||
|
macvlan_interface: "eth1"
|
||||||
|
|
||||||
|
# Enable nat in default gateway network interface
|
||||||
|
enable_nat_default_gateway: true
|
||||||
@@ -28,6 +28,9 @@
|
|||||||
# node5
|
# node5
|
||||||
# node6
|
# node6
|
||||||
|
|
||||||
|
[calico-rr]
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-master
|
kube-master
|
||||||
kube-node
|
kube-node
|
||||||
|
calico-rr
|
||||||
|
|||||||
1
logo/LICENSE
Normal file
1
logo/LICENSE
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# The Kubespray logo files are licensed under a choice of either Apache-2.0 or CC-BY-4.0 (Creative Commons Attribution 4.0 International).
|
||||||
4
logo/OWNERS
Normal file
4
logo/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- thomeced
|
||||||
BIN
logo/logo-clear.png
Normal file
BIN
logo/logo-clear.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.6 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user