mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-13 21:34:40 +03:00
Compare commits
455 Commits
release-2.
...
test/flatc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36e74a0e7b | ||
|
|
2cb3bcc3b6 | ||
|
|
d993c58880 | ||
|
|
79fbfdf271 | ||
|
|
cfaf397d4a | ||
|
|
2f404de77c | ||
|
|
d304966d75 | ||
|
|
4ce5510c1a | ||
|
|
8032b8281d | ||
|
|
45ecceb3e1 | ||
|
|
5a6ef1dafa | ||
|
|
0ae9ab36ce | ||
|
|
cf48915657 | ||
|
|
6f74ef17f7 | ||
|
|
fe2ab898b8 | ||
|
|
c8b8567781 | ||
|
|
bf86c14d35 | ||
|
|
e47eb4bc7f | ||
|
|
5222f48978 | ||
|
|
7b6b7318b2 | ||
|
|
f02d313fee | ||
|
|
7c9870d15b | ||
|
|
c8ea1468d1 | ||
|
|
ad26fe77f5 | ||
|
|
aae53a9df5 | ||
|
|
1513254622 | ||
|
|
4a5b524b98 | ||
|
|
aa0c0851f8 | ||
|
|
0fc56ed344 | ||
|
|
5c4e597987 | ||
|
|
ef133fd93d | ||
|
|
f6ca3bf477 | ||
|
|
b9e251ac7a | ||
|
|
43fceebdd3 | ||
|
|
862aec4dc6 | ||
|
|
4f3b214ef5 | ||
|
|
a4843eaf5e | ||
|
|
0f9f9fb569 | ||
|
|
e7c70d6169 | ||
|
|
cd9c21b7d6 | ||
|
|
36cd894d58 | ||
|
|
b38312d007 | ||
|
|
f3682d85d3 | ||
|
|
3ace8963b1 | ||
|
|
b551fe083d | ||
|
|
ae583e2a28 | ||
|
|
247b45bca6 | ||
|
|
c20388dbbb | ||
|
|
d5a5e6a93c | ||
|
|
bab6a9bf64 | ||
|
|
f70ace5300 | ||
|
|
c00fc9f221 | ||
|
|
5abaf8cdf4 | ||
|
|
02438442b9 | ||
|
|
03b40e71a3 | ||
|
|
e0920b33d7 | ||
|
|
56d37da105 | ||
|
|
fffc1b4ac0 | ||
|
|
c79b3ce46b | ||
|
|
0c59cc84dd | ||
|
|
16b090c5ff | ||
|
|
9e861cf816 | ||
|
|
04a8adb17a | ||
|
|
986f461ef1 | ||
|
|
96aadc3614 | ||
|
|
0efb415ec6 | ||
|
|
71ae3c78e2 | ||
|
|
315313dd10 | ||
|
|
f70c33d71a | ||
|
|
ac4c41e4e6 | ||
|
|
611f645907 | ||
|
|
e4905f1d1d | ||
|
|
acc843a5fa | ||
|
|
e62bbe0c76 | ||
|
|
d0f91adde4 | ||
|
|
a8d494fb95 | ||
|
|
119fa5b0c0 | ||
|
|
c8d75effcb | ||
|
|
aa6aa1522b | ||
|
|
e2d86c3413 | ||
|
|
7de9350c07 | ||
|
|
5d7236ea5f | ||
|
|
7c611890c3 | ||
|
|
6d4714b66e | ||
|
|
299178e587 | ||
|
|
fec663a27a | ||
|
|
f9a263090a | ||
|
|
800c84dcc9 | ||
|
|
f6ae46c9d8 | ||
|
|
8467724aab | ||
|
|
12270243f5 | ||
|
|
977e41ac5e | ||
|
|
359467b525 | ||
|
|
8f4b7f9f5d | ||
|
|
95f059d2c1 | ||
|
|
358bacf7ea | ||
|
|
0632f23a63 | ||
|
|
a665b43854 | ||
|
|
7590d95976 | ||
|
|
87f7363e46 | ||
|
|
1b2e66cd30 | ||
|
|
768fbeff0b | ||
|
|
7d21a54dc7 | ||
|
|
e0a141ab12 | ||
|
|
e332375293 | ||
|
|
a60ec1dbde | ||
|
|
31c470137f | ||
|
|
011e839f52 | ||
|
|
d7962fb46e | ||
|
|
dbb9900085 | ||
|
|
e24216bedc | ||
|
|
a51e7dd07d | ||
|
|
c557adf911 | ||
|
|
d10a2cd4c6 | ||
|
|
cfad1bd420 | ||
|
|
08b77b5350 | ||
|
|
fe0a1f4e42 | ||
|
|
624937d137 | ||
|
|
4373c1be1d | ||
|
|
59e1638ae1 | ||
|
|
6af849089e | ||
|
|
46e1fbcdd9 | ||
|
|
1567e8ee6c | ||
|
|
76c0a3aa75 | ||
|
|
e107022b4b | ||
|
|
ebcf9c3fff | ||
|
|
d23c1464c9 | ||
|
|
cbd0b7bbc3 | ||
|
|
67a73764e4 | ||
|
|
fba31beb07 | ||
|
|
775361206c | ||
|
|
12a2c5eaa8 | ||
|
|
ed789c9b97 | ||
|
|
85d9e3e2ae | ||
|
|
98cdb5348c | ||
|
|
f53552e56b | ||
|
|
277ab7339a | ||
|
|
191f71afea | ||
|
|
bfe858ba06 | ||
|
|
f8c4d5a899 | ||
|
|
9008c40d0e | ||
|
|
5a7e1be070 | ||
|
|
2a7b50a016 | ||
|
|
d2e51e777c | ||
|
|
89476b48e5 | ||
|
|
3f01d4725d | ||
|
|
a142f40e25 | ||
|
|
0e91000a04 | ||
|
|
e73c2d081c | ||
|
|
5862bff044 | ||
|
|
b548ccbe7f | ||
|
|
a5142e7dfd | ||
|
|
3930919283 | ||
|
|
b104bb7a57 | ||
|
|
bc36e9d440 | ||
|
|
d8629b8e7e | ||
|
|
c84336b48c | ||
|
|
403a73ac11 | ||
|
|
5ca23e3bfe | ||
|
|
4d3f06e69e | ||
|
|
d17bd286ea | ||
|
|
55cff4f3d3 | ||
|
|
76e07daa12 | ||
|
|
a551922c84 | ||
|
|
ba3258d7f0 | ||
|
|
9b56840d51 | ||
|
|
4351b47ebe | ||
|
|
b08c5e8b14 | ||
|
|
3527cb1916 | ||
|
|
81790cab91 | ||
|
|
9fbc566d98 | ||
|
|
ff768cc9fe | ||
|
|
ff3d9a0443 | ||
|
|
6608efb2c4 | ||
|
|
479fda6355 | ||
|
|
3a44411aa1 | ||
|
|
9334bc1fee | ||
|
|
c94daa4ff5 | ||
|
|
5be8155394 | ||
|
|
08913c4aa0 | ||
|
|
38dd224ffe | ||
|
|
24c59cee59 | ||
|
|
2be54b2bd7 | ||
|
|
ae68766015 | ||
|
|
9f58ba60f3 | ||
|
|
a6219c84c9 | ||
|
|
7941be127d | ||
|
|
c938dfa634 | ||
|
|
5a353cb04f | ||
|
|
1f186ed451 | ||
|
|
8443f370d4 | ||
|
|
1801debaea | ||
|
|
369be00960 | ||
|
|
ae1805587b | ||
|
|
55d1e4a4b5 | ||
|
|
ac9b76eb2e | ||
|
|
9ec9b3a202 | ||
|
|
0222a2a634 | ||
|
|
57490d5e5e | ||
|
|
5af3a34de8 | ||
|
|
54a01f2774 | ||
|
|
6f6da3d3c7 | ||
|
|
a6bc327d63 | ||
|
|
25d0380db7 | ||
|
|
3305ae9235 | ||
|
|
e7a5e3ca5c | ||
|
|
6c69ffed5b | ||
|
|
d173f1d951 | ||
|
|
91ad58a185 | ||
|
|
2fbf4806ed | ||
|
|
684f52eaf4 | ||
|
|
55e095c1c7 | ||
|
|
1127a62176 | ||
|
|
a3e569f5c4 | ||
|
|
bf70335493 | ||
|
|
180ce0b2ce | ||
|
|
331671ac30 | ||
|
|
03de8ff566 | ||
|
|
540c6ddb96 | ||
|
|
da077ab8a6 | ||
|
|
30f0a14489 | ||
|
|
acfaef2adf | ||
|
|
742409e663 | ||
|
|
a2cde9e77e | ||
|
|
7da317348c | ||
|
|
2dddb4fb65 | ||
|
|
18fab585ad | ||
|
|
86a949dc81 | ||
|
|
f6d1c294d4 | ||
|
|
630e9de658 | ||
|
|
12ed1fcf93 | ||
|
|
930df78d8a | ||
|
|
74aee12305 | ||
|
|
45847ce052 | ||
|
|
5bfc3396e9 | ||
|
|
b9e9364c50 | ||
|
|
61b9bb93f1 | ||
|
|
d9cf380ce0 | ||
|
|
1307b2fe07 | ||
|
|
782c0b35eb | ||
|
|
fccd143533 | ||
|
|
8702b6f3fd | ||
|
|
7c71f257b4 | ||
|
|
14e0df3450 | ||
|
|
31e56ab76d | ||
|
|
4b7125f5be | ||
|
|
e0c9152bd4 | ||
|
|
63adac8314 | ||
|
|
27ccfc7c66 | ||
|
|
990d2a1358 | ||
|
|
70c73f153b | ||
|
|
2705cfbe04 | ||
|
|
98807ffb6b | ||
|
|
8b96d00d30 | ||
|
|
f720290f8f | ||
|
|
fc264179b0 | ||
|
|
70b75d35b6 | ||
|
|
280507ff70 | ||
|
|
a074596c2c | ||
|
|
f83471484d | ||
|
|
9f01effadc | ||
|
|
e1ab3122c8 | ||
|
|
db9852e853 | ||
|
|
6b14be6624 | ||
|
|
c144c1ac9c | ||
|
|
69ca324192 | ||
|
|
56e41f0647 | ||
|
|
719c0b00c5 | ||
|
|
9d6344aac7 | ||
|
|
faeb114c31 | ||
|
|
795a2dc309 | ||
|
|
3f45301919 | ||
|
|
d4b2f8c9e9 | ||
|
|
2e145ffc12 | ||
|
|
83a340baf1 | ||
|
|
b8541962f3 | ||
|
|
e330ffa4ad | ||
|
|
6536ed41ac | ||
|
|
badfb6ca34 | ||
|
|
316e579543 | ||
|
|
e8bdd47ecc | ||
|
|
c4b53ff01a | ||
|
|
68718dcb6f | ||
|
|
05e2b47db6 | ||
|
|
47f67818b6 | ||
|
|
236a7486f4 | ||
|
|
b5464afa55 | ||
|
|
ceb4b2fa7d | ||
|
|
ff4de880ae | ||
|
|
329ffd45f0 | ||
|
|
1a4567ac29 | ||
|
|
9f88f19e31 | ||
|
|
a8e7238c9f | ||
|
|
c46e5dc33a | ||
|
|
81a66cc73d | ||
|
|
15af90db64 | ||
|
|
69201662df | ||
|
|
06ae6cfe8a | ||
|
|
76a5263ff3 | ||
|
|
91a77e417c | ||
|
|
aa76e39f79 | ||
|
|
bf6687b032 | ||
|
|
d23753e9f7 | ||
|
|
4e58413140 | ||
|
|
b7c1d68ea3 | ||
|
|
bf01b73578 | ||
|
|
1ec6711e95 | ||
|
|
53e5d8b392 | ||
|
|
5929935a19 | ||
|
|
9317e7ef25 | ||
|
|
9b7d2857d1 | ||
|
|
a469c1c955 | ||
|
|
107c3cc6f4 | ||
|
|
25ca0acf73 | ||
|
|
e1392c65b4 | ||
|
|
8ff4ad2d8e | ||
|
|
0f0e24be0f | ||
|
|
a070c72214 | ||
|
|
38cd05c503 | ||
|
|
c27cc33bd7 | ||
|
|
437026f514 | ||
|
|
0a2e68c9d3 | ||
|
|
a2a11819b3 | ||
|
|
63ed2c70da | ||
|
|
31a206033f | ||
|
|
66d3cb7e6f | ||
|
|
5cb07e0aac | ||
|
|
e293a887da | ||
|
|
20df44521d | ||
|
|
3f027abae6 | ||
|
|
d0f1d520ec | ||
|
|
bb7b4e0c7c | ||
|
|
2ba28a3389 | ||
|
|
5988ba0890 | ||
|
|
87270ebf26 | ||
|
|
e119863e04 | ||
|
|
99c620d510 | ||
|
|
daa9411b91 | ||
|
|
d1417d54ce | ||
|
|
693eb74f52 | ||
|
|
e8ee422808 | ||
|
|
65c67c5c51 | ||
|
|
5aea2abc40 | ||
|
|
87fc2b88d8 | ||
|
|
daa2144de3 | ||
|
|
687fa3dbed | ||
|
|
616e4b40db | ||
|
|
4e62e36f3a | ||
|
|
faa0816b95 | ||
|
|
b4768cfa91 | ||
|
|
a16d7b4365 | ||
|
|
7f90fc7b12 | ||
|
|
fb312e5179 | ||
|
|
9204f60b19 | ||
|
|
4f27bc2bf9 | ||
|
|
07e551ab77 | ||
|
|
a7ace2e55b | ||
|
|
8aa4c9ac0c | ||
|
|
fb92206918 | ||
|
|
e008e8ee01 | ||
|
|
f3d4377a16 | ||
|
|
2717a2e585 | ||
|
|
461a480887 | ||
|
|
24e115c8b9 | ||
|
|
6b3eaf8312 | ||
|
|
b0fb06054e | ||
|
|
1d032d06d1 | ||
|
|
2826b357d4 | ||
|
|
ddd92c998c | ||
|
|
80b2765f20 | ||
|
|
bb4f1b1168 | ||
|
|
583583942c | ||
|
|
b0563c20b0 | ||
|
|
6b499186b0 | ||
|
|
1ccf0df540 | ||
|
|
d59a5bf431 | ||
|
|
fcbcf3c03b | ||
|
|
0eeac591ad | ||
|
|
fabf17a10c | ||
|
|
860c15cec1 | ||
|
|
8c3b2851f6 | ||
|
|
24e1765ae2 | ||
|
|
d3113ad869 | ||
|
|
bbd90f7657 | ||
|
|
3281c47f98 | ||
|
|
6352fee0fd | ||
|
|
9f6db4012c | ||
|
|
6c112a9b41 | ||
|
|
656ed796b9 | ||
|
|
e355bef79b | ||
|
|
15bb5b0789 | ||
|
|
fbcc8cc336 | ||
|
|
0679d9c8e9 | ||
|
|
dba00f2d85 | ||
|
|
9f45552201 | ||
|
|
ee0d9c5428 | ||
|
|
2a52e5f08c | ||
|
|
ebdc599b05 | ||
|
|
a2a2dfa419 | ||
|
|
baf0a331c9 | ||
|
|
1c0718bb7d | ||
|
|
03a055c383 | ||
|
|
e9d406ed08 | ||
|
|
99c6a884a9 | ||
|
|
1818993a8a | ||
|
|
88b6f08e26 | ||
|
|
7580e59bbf | ||
|
|
2ec1c93897 | ||
|
|
89ff0710e9 | ||
|
|
1fa4bb733d | ||
|
|
93ee1226eb | ||
|
|
4323e5d039 | ||
|
|
163697951c | ||
|
|
893e9cb177 | ||
|
|
76c42b4d3f | ||
|
|
b3b00775ea | ||
|
|
e550118314 | ||
|
|
c3de25c782 | ||
|
|
59dd713585 | ||
|
|
e5d2452828 | ||
|
|
8cb081a3d0 | ||
|
|
4bf2d7a2c2 | ||
|
|
1e769b7260 | ||
|
|
8d8d063de4 | ||
|
|
c601c8faf2 | ||
|
|
5ae433bf47 | ||
|
|
d54356e113 | ||
|
|
c87097fc35 | ||
|
|
4e6ae04b06 | ||
|
|
8e254ec1e8 | ||
|
|
a8b66fd207 | ||
|
|
d54cfba6c2 | ||
|
|
533dbc62fe | ||
|
|
95f038559b | ||
|
|
bb724655ae | ||
|
|
538a1f2791 | ||
|
|
230cb37626 | ||
|
|
dec4e711d1 | ||
|
|
1b1045c0e2 | ||
|
|
86855be634 | ||
|
|
b2e64aed4b | ||
|
|
a2644c7a4f | ||
|
|
e256f74f2a | ||
|
|
2710e984c8 | ||
|
|
da0e445d69 | ||
|
|
a7616231a4 | ||
|
|
fe60832a02 | ||
|
|
1bc61c9f35 | ||
|
|
872d717105 | ||
|
|
1533d40411 | ||
|
|
4b324cb0f0 | ||
|
|
d4bf3b9dc7 | ||
|
|
5b057c7328 | ||
|
|
d3402736d4 | ||
|
|
47c3949477 |
@@ -37,3 +37,9 @@ exclude_paths:
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
- .github
|
||||
- .ansible
|
||||
- .cache
|
||||
- .gitlab-ci.yml
|
||||
- .gitlab-ci
|
||||
mock_modules:
|
||||
- gluster.gluster.gluster_volume
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -36,11 +36,35 @@ body:
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: textarea
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
options:
|
||||
- 'RHEL 9'
|
||||
- 'RHEL 8'
|
||||
- 'Fedora 40'
|
||||
- 'Ubuntu 24'
|
||||
- 'Ubuntu 22'
|
||||
- 'Ubuntu 20'
|
||||
- 'Debian 12'
|
||||
- 'Debian 11'
|
||||
- 'Flatcar Container Linux'
|
||||
- 'openSUSE Leap'
|
||||
- 'openSUSE Tumbleweed'
|
||||
- 'Oracle Linux 9'
|
||||
- 'Oracle Linux 8'
|
||||
- 'AlmaLinux 9'
|
||||
- 'AlmaLinux 8'
|
||||
- 'Rocky Linux 9'
|
||||
- 'Rocky Linux 8'
|
||||
- 'Amazon Linux 2'
|
||||
- 'Kylin Linux Advanced Server V10'
|
||||
- 'UOS Linux 20'
|
||||
- 'openEuler 24'
|
||||
- 'openEuler 22'
|
||||
- 'openEuler 20'
|
||||
- 'Other|Unsupported'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
1
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,4 +1,5 @@
|
||||
---
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Support Request
|
||||
url: https://kubernetes.slack.com/channels/kubespray
|
||||
|
||||
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -7,3 +7,15 @@ updates:
|
||||
labels:
|
||||
- dependencies
|
||||
- release-note-none
|
||||
groups:
|
||||
molecule:
|
||||
patterns:
|
||||
- molecule
|
||||
- molecule-plugins*
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels:
|
||||
- release-note-none
|
||||
- ci-short
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
32
.github/workflows/auto-label-os.yml
vendored
Normal file
32
.github/workflows/auto-label-os.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Issue labeler
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
label-component:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@39087a4b30cb98d57f25f34d617a6af8163c17d9
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
block-list: |
|
||||
None
|
||||
Other
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
54
.github/workflows/upgrade-patch-versions-schedule.yml
vendored
Normal file
54
.github/workflows/upgrade-patch-versions-schedule.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Upgrade Kubespray components with new patches versions - all branches
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '22 2 * * *' # every day, 02:22 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
jobs:
|
||||
get-releases-branches:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
branches: ${{ steps.get-branches.outputs.data }}
|
||||
steps:
|
||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
||||
id: get-branches
|
||||
with:
|
||||
query: |
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 0, # TODO increment once we have release branch with the new checksums format
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
direction: DESC
|
||||
}) {
|
||||
nodes {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
variables: |
|
||||
owner: ${{ github.repository_owner }}
|
||||
name: ${{ github.event.repository.name }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-versions:
|
||||
needs: get-releases-branches
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch:
|
||||
- name: ${{ github.event.repository.default_branch }}
|
||||
- ${{ fromJSON(needs.get-releases-branches.outputs.branches).repository.refs.nodes }}
|
||||
uses: ./.github/workflows/upgrade-patch-versions.yml
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
name: Update patch updates on ${{ matrix.branch.name }}
|
||||
with:
|
||||
branch: ${{ matrix.branch.name }}
|
||||
44
.github/workflows/upgrade-patch-versions.yml
vendored
Normal file
44
.github/workflows/upgrade-patch-versions.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
branch:
|
||||
description: Which branch to update with new patch versions
|
||||
default: master
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
update-patch-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.13'
|
||||
cache: 'pip'
|
||||
- run: pip install scripts/component_hash_update pre-commit
|
||||
- run: update-hashes
|
||||
env:
|
||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: pre-commit-hook-propagate
|
||||
path: |
|
||||
~/.cache/pre-commit
|
||||
- run: pre-commit run --all-files propagate-ansible-variables
|
||||
continue-on-error: true
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
||||
with:
|
||||
commit-message: Patch versions updates
|
||||
title: Patch versions updates - ${{ inputs.branch }}
|
||||
labels: bot
|
||||
branch: component_hash_update/${{ inputs.branch }}
|
||||
sign-commits: true
|
||||
body: |
|
||||
/kind feature
|
||||
|
||||
```release-note
|
||||
NONE
|
||||
```
|
||||
@@ -6,37 +6,37 @@ stages:
|
||||
- deploy-extended
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.25.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
GIT_CONFIG_COUNT: 2
|
||||
GIT_CONFIG_KEY_0: user.email
|
||||
GIT_CONFIG_VALUE_0: "ci@kubespray.io"
|
||||
GIT_CONFIG_KEY_1: user.name
|
||||
GIT_CONFIG_VALUE_1: "Kubespray CI"
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
SSH_USER: root
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
ANSIBLE_VERBOSITY: 2
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
TF_VERSION: 1.3.7
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- mkdir -p /.ssh
|
||||
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
@@ -48,29 +48,17 @@ before_script:
|
||||
- cluster-dump/
|
||||
needs:
|
||||
- pipeline-image
|
||||
variables:
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
|
||||
.job-moderated:
|
||||
extends: .job
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- check-galaxy-version # lint
|
||||
- pre-commit # lint
|
||||
- vagrant-validate # lint
|
||||
|
||||
.testcases: &testcases
|
||||
extends: .job-moderated
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
ci-not-authorized:
|
||||
@@ -102,6 +90,6 @@ include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/kubevirt.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
.build-container:
|
||||
pipeline-image:
|
||||
cache:
|
||||
key: $CI_COMMIT_REF_SLUG
|
||||
paths:
|
||||
@@ -11,22 +11,19 @@
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: ['']
|
||||
variables:
|
||||
TAG: $CI_COMMIT_SHORT_SHA
|
||||
PROJECT_DIR: $CI_PROJECT_DIR
|
||||
DOCKERFILE: Dockerfile
|
||||
GODEBUG: "http2client=0"
|
||||
before_script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
# TODO: remove the override
|
||||
# currently rebase.sh depends on bash (not available in the kaniko image)
|
||||
# once we have a simpler rebase (which should be easy if the target branch ref is available as variable
|
||||
# we'll be able to rebase here as well hopefully
|
||||
before_script: []
|
||||
script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
- /kaniko/executor --cache=true
|
||||
--cache-dir=image-cache
|
||||
--context $PROJECT_DIR
|
||||
--dockerfile $PROJECT_DIR/$DOCKERFILE
|
||||
--context $CI_PROJECT_DIR
|
||||
--dockerfile $CI_PROJECT_DIR/pipeline.Dockerfile
|
||||
--label 'git-branch'=$CI_COMMIT_REF_SLUG
|
||||
--label 'git-tag=$CI_COMMIT_TAG'
|
||||
--destination $PIPELINE_IMAGE
|
||||
|
||||
pipeline-image:
|
||||
extends: .build-container
|
||||
variables:
|
||||
DOCKERFILE: pipeline.Dockerfile
|
||||
--log-timestamp=true
|
||||
|
||||
148
.gitlab-ci/kubevirt.yml
Normal file
148
.gitlab-ci/kubevirt.yml
Normal file
@@ -0,0 +1,148 @@
|
||||
---
|
||||
.kubevirt:
|
||||
extends: .job-moderated
|
||||
interruptible: true
|
||||
script:
|
||||
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
||||
-c local -e @"tests/files/${TESTCASE}.yml"
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
|
||||
# TODO: generate testcases matrixes from the files in tests/files/
|
||||
# this is needed to avoid the need for PR rebasing when a job was added or remvoed in the target branch
|
||||
# (currently, a removed job in the target branch breaks the tests, because the
|
||||
# pipeline definition is parsed by gitlab before the rebase.sh script)
|
||||
# CI template for PRs
|
||||
pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .kubevirt
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux8-calico
|
||||
- almalinux9-crio
|
||||
- almalinux9-kube-ovn
|
||||
- debian11-calico-collection
|
||||
- debian11-macvlan
|
||||
- debian12-cilium
|
||||
- fedora39-kube-router
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
- openeuler24-calico
|
||||
- opensuse15-6-calico
|
||||
- rockylinux8-calico
|
||||
- rockylinux9-cilium
|
||||
- ubuntu20-calico-all-in-one-hardening
|
||||
- ubuntu20-cilium-sep
|
||||
- ubuntu20-flannel-collection
|
||||
- ubuntu20-kube-router-sep
|
||||
- ubuntu20-kube-router-svc-proxy
|
||||
- ubuntu22-calico-all-in-one
|
||||
- ubuntu22-calico-all-in-one-upgrade
|
||||
- ubuntu24-calico-etcd-datastore
|
||||
- flatcar4081-calico
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .kubevirt
|
||||
variables:
|
||||
TESTCASE: ubuntu20-calico-all-in-one
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
pr_full:
|
||||
extends: .kubevirt
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux9-calico-ha-ebpf
|
||||
- almalinux9-calico-nodelocaldns-secondary
|
||||
- debian11-custom-cni
|
||||
- debian11-kubelet-csr-approver
|
||||
- debian12-custom-cni-helm
|
||||
- fedora39-calico-swap-selinux
|
||||
- fedora39-crio
|
||||
- ubuntu20-all-in-one-docker
|
||||
- ubuntu20-calico-ha-wireguard
|
||||
- ubuntu20-flannel-ha
|
||||
- ubuntu20-flannel-ha-once
|
||||
|
||||
# Need an update of the container image to use schema v2
|
||||
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||
manual:
|
||||
extends: pr_full
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- amazon-linux-2-all-in-one
|
||||
rules:
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
pr_extended:
|
||||
extends: .kubevirt
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux9-calico
|
||||
- almalinux9-calico-remove-node
|
||||
- almalinux9-docker
|
||||
- debian11-docker
|
||||
- debian12-calico
|
||||
- debian12-docker
|
||||
- opensuse15-6-docker-cilium
|
||||
- rockylinux9-calico
|
||||
- ubuntu20-calico-etcd-kubeadm
|
||||
- ubuntu20-flannel
|
||||
- ubuntu22-all-in-one-docker
|
||||
- ubuntu24-all-in-one-docker
|
||||
- ubuntu24-calico-all-in-one
|
||||
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .kubevirt
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- debian11-calico-upgrade
|
||||
- debian11-calico-upgrade-once
|
||||
- debian12-cilium-svc-proxy
|
||||
- fedora39-calico-selinux
|
||||
- fedora40-docker-calico
|
||||
- ubuntu20-calico-etcd-kubeadm-upgrade-ha
|
||||
- ubuntu20-calico-ha-recover
|
||||
- ubuntu20-calico-ha-recover-noquorum
|
||||
@@ -3,15 +3,16 @@ pre-commit:
|
||||
stage: test
|
||||
tags:
|
||||
- ffci
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
- pre-commit run --all-files --show-diff-on-failure
|
||||
cache:
|
||||
key: pre-commit-all
|
||||
key: pre-commit-2
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
- ${PRE_COMMIT_HOME}
|
||||
when: 'always'
|
||||
needs: []
|
||||
|
||||
vagrant-validate:
|
||||
@@ -23,13 +24,3 @@ vagrant-validate:
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
|
||||
# TODO: convert to pre-commit hook
|
||||
check-galaxy-version:
|
||||
needs: []
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
@@ -1,26 +1,13 @@
|
||||
---
|
||||
.molecule:
|
||||
tags: [ffci-vm-med]
|
||||
tags: [ffci]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
needs: []
|
||||
image: $PIPELINE_IMAGE
|
||||
needs:
|
||||
- pipeline-image
|
||||
# - ci-not-authorized
|
||||
variables:
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
before_script:
|
||||
- groups
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
@@ -30,66 +17,38 @@
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
molecule:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i $ROLE
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
|
||||
.molecule_periodic:
|
||||
molecule_full:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part1
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# # Stage 3 container engines don't get as much attention so allow them to fail
|
||||
# molecule_kata:
|
||||
# extends: .molecule
|
||||
# stage: deploy-extended
|
||||
# script:
|
||||
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
# when: manual
|
||||
# # FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
extends: molecule
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
# FIXME : tests below are perma-failing
|
||||
- container-engine/kata-containers
|
||||
- container-engine/gvisor
|
||||
- container-engine/youki
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
---
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
## Uncomment this to have multiple stages
|
||||
# needs:
|
||||
# - packet_ubuntu20-calico-all-in-one
|
||||
|
||||
.packet_pr_short:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_manual:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_extended:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr_short
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu22-calico-all-in-one-upgrade:
|
||||
extends: .packet_pr
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu24-calico-etcd-datastore:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-macvlan:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian12-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
extends: .packet_pr
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
extends: .packet_pr
|
||||
|
||||
## Extended
|
||||
packet_debian11-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
extends: .packet_pr_extended
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
# ### MANUAL JOBS
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
# PERIODIC
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_debian12-cilium-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
# stub pipeline for dynamic generation
|
||||
pre-commit:
|
||||
tags:
|
||||
- light
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
cache:
|
||||
key: pre-commit-$HOOK_ID
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
parallel:
|
||||
matrix:
|
||||
- HOOK_ID:
|
||||
@@ -5,28 +5,21 @@
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
- pipeline-image
|
||||
variables:
|
||||
TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub"
|
||||
TF_VAR_ssh_private_key_path: $ANSIBLE_PRIVATE_KEY_FILE
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TERRAFORM_STATE_ROOT: $CI_PROJECT_DIR
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
||||
- ./tests/scripts/terraform_install.sh
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
.terraform_validate:
|
||||
terraform_validate:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
only: ['master', /^pr-.*$/]
|
||||
@@ -36,6 +29,17 @@
|
||||
stage: test
|
||||
needs:
|
||||
- pipeline-image
|
||||
parallel:
|
||||
matrix:
|
||||
- PROVIDER:
|
||||
- openstack
|
||||
- equinix
|
||||
- aws
|
||||
- exoscale
|
||||
- hetzner
|
||||
- vsphere
|
||||
- upcloud
|
||||
- nifcloud
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
@@ -43,99 +47,24 @@
|
||||
stage: deploy-extended
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
variables:
|
||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||
ANSIBLE_INVENTORY: hosts
|
||||
CI_PLATFORM: tf
|
||||
TF_VAR_ssh_user: $SSH_USER
|
||||
ANSIBLE_REMOTE_USER: ubuntu # the openstack terraform module does not handle custom user correctly
|
||||
ANSIBLE_SSH_RETRIES: 15
|
||||
TF_VAR_ssh_user: $ANSIBLE_REMOTE_USER
|
||||
TF_VAR_cluster_name: $CI_JOB_ID
|
||||
script:
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
- ssh-keygen -N '' -f $ANSIBLE_PRIVATE_KEY_FILE -t rsa
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-equinix:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: equinix
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-hetzner:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: hetzner
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
OS_PROJECT_NAME: "9361447987648822"
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: 8XuhBMfkKVrk
|
||||
OS_REGION_NAME: UK1
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
|
||||
|
||||
# Elastx is generously donating resources for Kubespray on Openstack CI
|
||||
# Contacts: @gix @bl0m1
|
||||
@@ -169,11 +98,8 @@ tf-elastx_ubuntu20-calico:
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
@@ -194,46 +120,3 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
---
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
vagrant:
|
||||
extends: .job-moderated
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb
|
||||
DOCKER_NAME: vagrant
|
||||
VAGRANT_ANSIBLE_TAGS: facts
|
||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
tags: [ffci-vm-large]
|
||||
# only: [/^pr-.*$/]
|
||||
# except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v13
|
||||
services: []
|
||||
before_script:
|
||||
- echo $USER
|
||||
@@ -26,38 +28,22 @@
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- vagrant up
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
after_script:
|
||||
- vagrant destroy -f
|
||||
cache:
|
||||
key: $CI_JOB_NAME_SLUG
|
||||
paths:
|
||||
- .vagrant.d/boxes
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part1
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- ubuntu24-calico-dual-stack
|
||||
- ubuntu24-calico-ipv6only-stack
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
@@ -20,12 +20,6 @@ repos:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
hooks:
|
||||
@@ -35,12 +29,10 @@ repos:
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v24.5.0
|
||||
rev: v25.1.1
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
- ansible==9.8.0
|
||||
- jsonschema==4.22.0
|
||||
- jmespath==1.0.1
|
||||
- netaddr==1.3.0
|
||||
- distlib
|
||||
@@ -53,28 +45,6 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
additional_dependencies:
|
||||
- ansible==9.5.1
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- tox==4.15.0
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: collection-build-install
|
||||
name: Build and install kubernetes-sigs.kubespray Ansible collection
|
||||
language: python
|
||||
@@ -100,6 +70,14 @@ repos:
|
||||
- pathlib
|
||||
- pyaml
|
||||
|
||||
- id: check-galaxy-version
|
||||
name: Verify correct version for galaxy.yml
|
||||
entry: scripts/galaxy_version.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ruamel.yaml
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
@@ -108,3 +86,25 @@ repos:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
|
||||
- id: propagate-ansible-variables
|
||||
name: Update static files referencing default kubespray values
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
entry: scripts/propagate_ansible_variables.yml
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-checksums-sorted
|
||||
name: Check that our checksums are correctly sorted by version
|
||||
entry: scripts/assert-sorted-checksums.yml
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ansible
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
|
||||
@@ -34,11 +34,9 @@ RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
7
Makefile
7
Makefile
@@ -1,7 +0,0 @@
|
||||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
174
README.md
174
README.md
@@ -5,7 +5,7 @@
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_providers/openstack.md), [vSphere](docs/cloud_providers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_controllers/openstack.md), [vSphere](docs/cloud_controllers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
@@ -15,74 +15,23 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Docker
|
||||
|
||||
Ensure you have installed Docker then
|
||||
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Usage
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.25.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.25.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.25.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
See [Getting started](/docs/getting_started/getting-started.md)
|
||||
|
||||
#### Collection
|
||||
|
||||
@@ -123,12 +72,9 @@ vagrant up
|
||||
- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md)
|
||||
- [openSUSE setup](docs/operating_systems/opensuse.md)
|
||||
- [Downloaded artifacts](docs/advanced/downloads.md)
|
||||
- [Cloud providers](docs/cloud_providers/cloud.md)
|
||||
- [OpenStack](docs/cloud_providers/openstack.md)
|
||||
- [AWS](docs/cloud_providers/aws.md)
|
||||
- [Azure](docs/cloud_providers/azure.md)
|
||||
- [vSphere](docs/cloud_providers/vsphere.md)
|
||||
- [Equinix Metal](docs/cloud_providers/equinix-metal.md)
|
||||
- [OpenStack](docs/cloud_controllers/openstack.md)
|
||||
- [vSphere](docs/cloud_controllers/vsphere.md)
|
||||
- [Large deployments](docs/operations/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/operations/nodes.md)
|
||||
- [Upgrades basics](docs/operations/upgrades.md)
|
||||
@@ -143,57 +89,61 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye
|
||||
- **Ubuntu** 20.04, 22.04, 24.04
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Fedora** 37, 38
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Fedora** 39, 40
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
Note:
|
||||
|
||||
- Upstart/SysV init based OS types are not supported.
|
||||
- [Kernel requirements](docs/operations/kernel-requirements.md) (please read if the OS kernel version is < 4.19).
|
||||
|
||||
## Supported Components
|
||||
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.30.4
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
|
||||
- [docker](https://www.docker.com/) v26.1
|
||||
- [containerd](https://containerd.io/) v1.7.21
|
||||
- [cri-o](http://cri-o.io/) v1.30.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
||||
- [docker](https://www.docker.com/) 28.0
|
||||
- [containerd](https://containerd.io/) 2.0.3
|
||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.28.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/rajch/weave) v2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.14.7
|
||||
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.11.2
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||
- [helm](https://helm.sh/) v3.15.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) 2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.16.4
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
||||
|
||||
<!-- END ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@@ -201,7 +151,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.28**
|
||||
- **Minimum required version of Kubernetes is v1.30**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
@@ -215,10 +165,10 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
Hardware:
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
- Control Plane
|
||||
- Memory: 2 GB
|
||||
- Worker Node
|
||||
- Memory: 1 GB
|
||||
|
||||
## Network Plugins
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
@@ -46,7 +45,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
|
||||
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: 3.0.6`,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
ant31
|
||||
VannTen
|
||||
yankay
|
||||
|
||||
71
Vagrantfile
vendored
71
Vagrantfile
vendored
@@ -26,11 +26,14 @@ SUPPORTED_OS = {
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"almalinux9" => {box: "almalinux/9", user: "vagrant"},
|
||||
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
||||
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"fedora39" => {box: "fedora/39-cloud-base", user: "vagrant"},
|
||||
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
||||
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
||||
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -55,16 +58,27 @@ $subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$network_plugin ||= "flannel"
|
||||
$inventories ||= []
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# Modify those to have separate groups (for instance, to test separate etcd:)
|
||||
# first_control_plane = 1
|
||||
# first_etcd = 4
|
||||
# control_plane_instances = 3
|
||||
# etcd_instances = 3
|
||||
$first_node ||= 1
|
||||
$first_control_plane ||= 1
|
||||
$first_etcd ||= 1
|
||||
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
$control_plane_instances ||= [$num_instances, 2].min
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
$kube_node_instances ||= $num_instances - $first_node + 1
|
||||
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
@@ -93,19 +107,6 @@ if ! SUPPORTED_OS.key?($os)
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
|
||||
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||
# vars etc to where vagrant expects dynamic inventory to be
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
@@ -204,7 +205,7 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
if ["rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
@@ -219,14 +220,20 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
ip6 = "#{$subnet_ipv6}::#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_address => ip6,
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64)
|
||||
# add default route for detect ansible_default_ipv6
|
||||
# TODO: fix libvirt__ipv6 or use $subnet in shell
|
||||
config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true"
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
@@ -235,15 +242,16 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
# Hack for fedora39/40 to get the IP address of the second interface
|
||||
if ["fedora39", "fedora40", "fedora39-arm64", "fedora40-arm64"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)/24
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
|
||||
# Rockylinux boxes needs UEFI
|
||||
if ["rockylinux8", "rockylinux9"].include? $os
|
||||
config.vm.provider "libvirt" do |domain|
|
||||
@@ -252,7 +260,7 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
if ["oraclelinux","oraclelinux8", "rhel8","rockylinux8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
@@ -286,23 +294,22 @@ Vagrant.configure("2") do |config|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
end
|
||||
ansible.become = true
|
||||
ansible.limit = "all,localhost"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}",
|
||||
"--flush-cache",
|
||||
"-e ansible_become_pass=vagrant"] +
|
||||
$inventories.map {|inv| ["-i", inv]}.flatten
|
||||
ansible.host_vars = host_vars
|
||||
ansible.extra_vars = $extra_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
# Kubespray DIND experimental setup
|
||||
|
||||
This ansible playbook creates local docker containers
|
||||
to serve as Kubernetes "nodes", which in turn will run
|
||||
"normal" Kubernetes docker containers, a mode usually
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
- dind-cluster: customizes each node container to have required
|
||||
system packages installed, and some utils (swapoff, lsattr)
|
||||
symlinked to /bin/true to ease mimicking a real node.
|
||||
|
||||
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
|
||||
as docker images (note that dind-cluster has specific customization
|
||||
for these images).
|
||||
|
||||
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
|
||||
helper (wraps up running `contrib/inventory_builder/inventory.py` with
|
||||
node containers IPs and prefix).
|
||||
|
||||
## Deploying
|
||||
|
||||
See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
```shell
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
|
||||
ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
```
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
```shell
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
```
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
```shell
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
```shell
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
```shell
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
```
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
See below to get an idea on how a completed deployment looks like,
|
||||
from the host where you ran kubespray playbooks.
|
||||
|
||||
### node_distro: debian
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
```shell
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
|
||||
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
|
||||
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
|
||||
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
|
||||
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
|
||||
|
||||
$ docker exec kube-node1 kubectl get node
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-node1 Ready master,node 18m v1.12.1
|
||||
kube-node2 Ready master,node 17m v1.12.1
|
||||
kube-node3 Ready node 17m v1.12.1
|
||||
kube-node4 Ready node 17m v1.12.1
|
||||
kube-node5 Ready node 17m v1.12.1
|
||||
|
||||
$ docker exec kube-node1 kubectl get pod --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
default netchecker-agent-67489 1/1 Running 0 2m51s
|
||||
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fsw92 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
|
||||
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
|
||||
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
|
||||
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
|
||||
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
|
||||
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
|
||||
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
|
||||
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
|
||||
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
|
||||
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
|
||||
kube-system kube-proxy-n64ls 1/1 Running 0 17m
|
||||
kube-system kube-proxy-pswmj 1/1 Running 0 18m
|
||||
kube-system kube-proxy-x89qw 1/1 Running 0 18m
|
||||
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
|
||||
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
|
||||
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
|
||||
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
|
||||
kube-system weave-net-42bfr 2/2 Running 0 16m
|
||||
kube-system weave-net-6gt8m 2/2 Running 0 16m
|
||||
kube-system weave-net-88nnc 2/2 Running 0 16m
|
||||
kube-system weave-net-shckr 2/2 Running 0 16m
|
||||
kube-system weave-net-xr46t 2/2 Running 0 16m
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
```
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
```shell
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
```
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
set of CNI specific `--extra-vars` combo.
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
user: "debian"
|
||||
pid1_exe: /lib/systemd/systemd
|
||||
init: |
|
||||
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
|
||||
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@*
|
||||
ssh_service: ssh
|
||||
extra_packages: []
|
||||
ubuntu:
|
||||
<<: *DEBIAN
|
||||
image: "ubuntu:16.04"
|
||||
user: "ubuntu"
|
||||
init: |
|
||||
/sbin/init
|
||||
centos: &CENTOS
|
||||
image: "centos:8"
|
||||
user: "centos"
|
||||
pid1_exe: /usr/lib/systemd/systemd
|
||||
init: |
|
||||
/sbin/init
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@* serial-getty@*
|
||||
ssh_service: sshd
|
||||
extra_packages: []
|
||||
fedora:
|
||||
<<: *CENTOS
|
||||
image: "fedora:latest"
|
||||
user: "fedora"
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
|
||||
extra_packages:
|
||||
- hostname
|
||||
- procps
|
||||
- findutils
|
||||
- kmod
|
||||
- iputils
|
||||
@@ -1,15 +0,0 @@
|
||||
[local]
|
||||
# If you created a virtualenv for ansible, you may need to specify running the
|
||||
# python binary from there instead:
|
||||
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
|
||||
localhost ansible_connection=local
|
||||
|
||||
[containers]
|
||||
kube-node1
|
||||
kube-node2
|
||||
kube-node3
|
||||
kube-node4
|
||||
kube-node5
|
||||
|
||||
[containers:vars]
|
||||
ansible_connection=docker
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
kubelet_fail_swap_on: false
|
||||
|
||||
# Docker nodes need to have been created with same "node_distro: debian"
|
||||
# at contrib/dind/group_vars/all/all.yaml
|
||||
bootstrap_os: debian
|
||||
|
||||
docker_version: latest
|
||||
|
||||
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
|
||||
|
||||
dns_mode: coredns
|
||||
|
||||
deploy_netchecker: true
|
||||
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||
netcheck_agent_image_tag: v1.0
|
||||
netcheck_server_image_tag: v1.0
|
||||
@@ -1 +0,0 @@
|
||||
docker
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
|
||||
|
||||
- name: Null-ify some linux tools to ease DIND
|
||||
file:
|
||||
src: "/bin/true"
|
||||
dest: "{{ item }}"
|
||||
state: link
|
||||
force: true
|
||||
with_items:
|
||||
# DIND box may have swap enable, don't bother
|
||||
- /sbin/swapoff
|
||||
# /etc/hosts handling would fail on trying to copy file attributes on edit,
|
||||
# void it by successfully returning nil output
|
||||
- /usr/bin/lsattr
|
||||
# disable selinux-isms, sp needed if running on non-Selinux host
|
||||
- /usr/sbin/semodule
|
||||
|
||||
- name: Void installing dpkg docs and man pages on Debian based distros
|
||||
copy:
|
||||
content: |
|
||||
# Delete locales
|
||||
path-exclude=/usr/share/locale/*
|
||||
# Delete man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
# Delete docs
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: "0644"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install system packages to better match a full-fledge node
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
with_items:
|
||||
- rsyslog
|
||||
- "{{ distro_ssh_service }}"
|
||||
|
||||
- name: Create distro user "{{ distro_user }}"
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
# groups: sudo
|
||||
append: true
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: "0640"
|
||||
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
@@ -1,87 +0,0 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
|
||||
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
|
||||
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
community.docker.docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
# recreate: true
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
volumes:
|
||||
- /boot:/boot
|
||||
- /lib/modules:/lib/modules
|
||||
- "{{ item }}:/dind/docker"
|
||||
register: containers
|
||||
with_items: "{{ groups.containers }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Gather list of containers IPs
|
||||
set_fact:
|
||||
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Create inventory_builder helper already set with the list of node containers' IPs
|
||||
template:
|
||||
src: inventory_builder.sh.j2
|
||||
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||
mode: "0755"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
|
||||
raw: |
|
||||
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
|
||||
pkill -STOP agetty || true
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
|
||||
- name: Remove gettys from node containers
|
||||
raw: |
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
|
||||
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
|
||||
@@ -1,93 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Q&D test'em all: creates full DIND kubespray deploys
|
||||
# for each distro, verifying it via netchecker.
|
||||
|
||||
info() {
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
echo "INFO: [$date] $msg"
|
||||
}
|
||||
pass_or_fail() {
|
||||
local rc="$?"
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
|
||||
return $rc
|
||||
}
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
# expand $extra with -e in front of each word
|
||||
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
|
||||
ansible-playbook --become -e ansible_ssh_user=$distro -i \
|
||||
${INVENTORY_DIR}/hosts.ini cluster.yml \
|
||||
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
|
||||
pass_or_fail "$prefix: kubespray"
|
||||
) || return 1
|
||||
local node0=${NODES[0]}
|
||||
docker exec ${node0} kubectl get pod --all-namespaces
|
||||
pass_or_fail "$prefix: kube-api" || return 1
|
||||
let retries=60
|
||||
while ((retries--)); do
|
||||
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
|
||||
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
|
||||
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
|
||||
sleep 2
|
||||
done
|
||||
[ $retries -ge 0 ]
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
mkdir -p ${OUTPUT_DIR}
|
||||
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
|
||||
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
|
||||
for spec in ${SPECS}; do
|
||||
unset DISTROS EXTRAS
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
time test_distro ${distro} ${extra}
|
||||
} |& tee ${file_out}
|
||||
# sleeping for the sake of the human to verify if they want
|
||||
sleep 2m
|
||||
done
|
||||
done
|
||||
done
|
||||
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)
|
||||
@@ -1,11 +0,0 @@
|
||||
# Test spec file: used from ./run-test-distros.sh, will run
|
||||
# each distro in $DISTROS overloading main kubespray ansible-playbook run
|
||||
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
|
||||
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
|
||||
DISTROS=(debian ubuntu centos fedora)
|
||||
|
||||
# Each line below will be added as --extra-vars to main playbook run
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico'
|
||||
'kube_network_plugin=weave'
|
||||
)
|
||||
@@ -1,6 +0,0 @@
|
||||
DISTROS=(debian centos)
|
||||
NETCHECKER_HOST=${NODES[0]}
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
|
||||
)
|
||||
@@ -1,8 +0,0 @@
|
||||
DISTROS=(debian centos)
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico {}'
|
||||
'kube_network_plugin=canal {}'
|
||||
'kube_network_plugin=cilium {}'
|
||||
'kube_network_plugin=flannel {}'
|
||||
'kube_network_plugin=weave {}'
|
||||
)
|
||||
@@ -1,480 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Usage: inventory.py ip1 [ip2 ...]
|
||||
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
#
|
||||
# Advanced usage:
|
||||
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
# Add hosts with different ip and access ip:
|
||||
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||
# Add hosts with a specific hostname, ip, and optional access ip:
|
||||
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
# Delete a host: inventory.py -10.10.1.3
|
||||
# Delete a host by id: inventory.py -node1
|
||||
#
|
||||
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
|
||||
# YAML file should be in the following format:
|
||||
# group1:
|
||||
# host1:
|
||||
# ip: X.X.X.X
|
||||
# var: val
|
||||
# group2:
|
||||
# host2:
|
||||
# ip: X.X.X.X
|
||||
|
||||
from collections import OrderedDict
|
||||
from ipaddress import ip_address
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
|
||||
|
||||
|
||||
def get_var_as_bool(name, default):
|
||||
value = os.environ.get(name, '')
|
||||
return _boolean_states.get(value.lower(), default)
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
||||
class KubesprayInventory(object):
|
||||
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
else: # Show help if no options
|
||||
self.show_help()
|
||||
sys.exit(0)
|
||||
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def write_config(self, config_file):
|
||||
if config_file:
|
||||
with open(self.config_file, 'w') as f:
|
||||
yaml.dump(self.yaml_config, f)
|
||||
|
||||
else:
|
||||
print("WARNING: Unable to save config. Make sure you set "
|
||||
"CONFIG_FILE env var.")
|
||||
|
||||
def debug(self, msg):
|
||||
if DEBUG:
|
||||
print("DEBUG: {0}".format(msg))
|
||||
|
||||
def get_ip_from_opts(self, optstring):
|
||||
if 'ip' in optstring:
|
||||
return optstring['ip']
|
||||
else:
|
||||
raise ValueError("IP parameter not found in options")
|
||||
|
||||
def ensure_required_groups(self, groups):
|
||||
for group in groups:
|
||||
if group == 'all':
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config:
|
||||
all_dict = OrderedDict([('hosts', OrderedDict({})),
|
||||
('children', OrderedDict({}))])
|
||||
self.yaml_config = {'all': all_dict}
|
||||
else:
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config['all']['children']:
|
||||
self.yaml_config['all']['children'][group] = {'hosts': {}}
|
||||
|
||||
def get_host_id(self, host):
|
||||
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||
try:
|
||||
short_hostname = host.split('.')[0]
|
||||
return int(re.findall("\\d+$", short_hostname)[-1])
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
next_host = ""
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
all_hosts.pop(realhost)
|
||||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
else:
|
||||
ip = host
|
||||
access_ip = host
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
|
||||
if USE_REAL_HOSTNAME:
|
||||
cmd = ("ssh -oStrictHostKeyChecking=no "
|
||||
+ access_ip + " 'hostname -s'")
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
hostname, ip, access_ip = host.split(',')
|
||||
except Exception:
|
||||
hostname, ip = host.split(',')
|
||||
access_ip = ip
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
all_hosts[hostname] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
def ips(start_address, end_address):
|
||||
try:
|
||||
# Python 3.x
|
||||
start = int(ip_address(start_address))
|
||||
end = int(ip_address(end_address))
|
||||
except Exception:
|
||||
# Python 2.7
|
||||
start = int(ip_address(str(start_address)))
|
||||
end = int(ip_address(str(end_address)))
|
||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||
|
||||
for host in hosts:
|
||||
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
|
||||
start, end = host.strip().split('-')
|
||||
try:
|
||||
reworked_hosts.extend(ips(start, end))
|
||||
except ValueError:
|
||||
raise Exception("Range of ip_addresses isn't valid")
|
||||
else:
|
||||
reworked_hosts.append(host)
|
||||
return reworked_hosts
|
||||
|
||||
def exists_hostname(self, existing_hosts, hostname):
|
||||
return hostname in existing_hosts.keys()
|
||||
|
||||
def exists_ip(self, existing_hosts, ip):
|
||||
for host_opts in existing_hosts.values():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_host_by_ip(self, existing_hosts, ip):
|
||||
for hostname, host_opts in existing_hosts.items():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
del existing_hosts[hostname]
|
||||
return
|
||||
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug(
|
||||
"Host {0} removed from role {1}".format(host, role)) # noqa
|
||||
del self.yaml_config['all']['children'][role]['hosts'][host] # noqa
|
||||
# purge from all
|
||||
if self.yaml_config['all']['hosts']:
|
||||
all_hosts = self.yaml_config['all']['hosts'].copy()
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug("Host {0} removed from role all".format(host))
|
||||
del self.yaml_config['all']['hosts'][host]
|
||||
|
||||
def add_host_to_group(self, group, host, opts=""):
|
||||
self.debug("adding host {0} to group {1}".format(host, group))
|
||||
if group == 'all':
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('etcd', host)
|
||||
|
||||
def load_file(self, files=None):
|
||||
'''Directly loads JSON to inventory.'''
|
||||
|
||||
if not files:
|
||||
raise Exception("No input file specified.")
|
||||
|
||||
import json
|
||||
|
||||
for filename in list(files):
|
||||
# Try JSON
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
except ValueError:
|
||||
raise Exception("Cannot read %s as JSON, or CSV", filename)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
self.set_k8s_cluster()
|
||||
for group, hosts in data.items():
|
||||
self.ensure_required_groups([group])
|
||||
for host, opts in hosts.items():
|
||||
optstring = {'ansible_host': opts['ip'],
|
||||
'ip': opts['ip'],
|
||||
'access_ip': opts['ip']}
|
||||
self.add_host_to_group('all', host, optstring)
|
||||
self.add_host_to_group(group, host)
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def parse_command(self, command, args=None):
|
||||
if command == 'help':
|
||||
self.show_help()
|
||||
elif command == 'print_cfg':
|
||||
self.print_config()
|
||||
elif command == 'print_ips':
|
||||
self.print_ips()
|
||||
elif command == 'print_hostnames':
|
||||
self.print_hostnames()
|
||||
elif command == 'load':
|
||||
self.load_file(args)
|
||||
else:
|
||||
raise Exception("Invalid command specified.")
|
||||
|
||||
def show_help(self):
|
||||
help_text = '''Usage: inventory.py ip1 [ip2 ...]
|
||||
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
|
||||
Available commands:
|
||||
help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
yaml.dump(self.yaml_config, sys.stdout)
|
||||
|
||||
def print_hostnames(self):
|
||||
print(' '.join(self.yaml_config['all']['hosts'].keys()))
|
||||
|
||||
def print_ips(self):
|
||||
ips = []
|
||||
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||
ips.append(self.get_ip_from_opts(opts))
|
||||
print(' '.join(ips))
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,3 +0,0 @@
|
||||
configparser>=3.3.0
|
||||
ipaddress
|
||||
ruamel.yaml>=0.15.88
|
||||
@@ -1,3 +0,0 @@
|
||||
[metadata]
|
||||
name = kubespray-inventory-builder
|
||||
version = 0.1
|
||||
@@ -1,29 +0,0 @@
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=[],
|
||||
pbr=False)
|
||||
@@ -1,3 +0,0 @@
|
||||
hacking>=0.10.2
|
||||
mock>=1.3.0
|
||||
pytest>=2.8.0
|
||||
@@ -1,595 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
||||
path = "./contrib/inventory_builder/"
|
||||
if path not in sys.path:
|
||||
sys.path.append(path)
|
||||
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
sys_mock.exit = mock.Mock()
|
||||
super(TestInventory, self).setUp()
|
||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||
self.inv = inventory.KubesprayInventory()
|
||||
|
||||
def test_get_ip_from_opts(self):
|
||||
optstring = {'ansible_host': '10.90.3.2',
|
||||
'ip': '10.90.3.2',
|
||||
'access_ip': '10.90.3.2'}
|
||||
expected = "10.90.3.2"
|
||||
result = self.inv.get_ip_from_opts(optstring)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_ip_from_opts_invalid(self):
|
||||
optstring = "notanaddr=value something random!chars:D"
|
||||
self.assertRaisesRegex(ValueError, "IP parameter not found",
|
||||
self.inv.get_ip_from_opts, optstring)
|
||||
|
||||
def test_ensure_required_groups(self):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
'node3.xyz123.aaa']
|
||||
expected = [99, 1, 1, 1, 3]
|
||||
for hostname, expected in zip(hostnames, expected):
|
||||
result = self.inv.get_host_id(hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_host_id_invalid(self):
|
||||
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||
for hostname in bad_hostnames:
|
||||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
hostname = 'node1'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_negative(self):
|
||||
hostname = 'node99'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_delete_host_by_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, existing_hosts)
|
||||
|
||||
def test_delete_host_by_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.assertRaisesRegex(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
|
||||
def test_purge_invalid_hosts(self):
|
||||
proper_hostnames = ['node1', 'node2']
|
||||
bad_host = 'doesnotbelong2'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertNotIn(
|
||||
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
opts = {'ip': '10.90.0.2'}
|
||||
|
||||
self.inv.add_host_to_group(group, host, opts)
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_all(self):
|
||||
hosts = OrderedDict([
|
||||
('node1', 'opt1'),
|
||||
('node2', 'opt2')])
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
for host, opt in hosts.items():
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertIn(
|
||||
host,
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
expected = ['10.90.0.2',
|
||||
'10.90.0.4',
|
||||
'10.90.0.5',
|
||||
'10.90.0.6',
|
||||
'10.90.0.8']
|
||||
result = self.inv.range2ips(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_range2ips_incorrect_range(self):
|
||||
host_range = ['10.90.0.4-a.9b.c.e']
|
||||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
@@ -1,34 +0,0 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
allowlist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg
|
||||
@@ -1,11 +0,0 @@
|
||||
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||
|
||||
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||
deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
## User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
@@ -1,2 +0,0 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
become: true
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
roles:
|
||||
- { role: kvm-setup }
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- bind-utils
|
||||
- ntp
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
apt:
|
||||
upgrade: true
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
install_recommends: false
|
||||
with_items:
|
||||
- dnsutils
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
|
||||
- name: Add br_netfilter into /etc/modules
|
||||
lineinfile:
|
||||
dest: /etc/modules
|
||||
state: present
|
||||
line: 'br_netfilter'
|
||||
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||
|
||||
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kubespray.conf
|
||||
content: |-
|
||||
### This file is managed by Ansible
|
||||
br-netfilter
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: br_netfilter is defined
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: true
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: true
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
when: br_netfilter is defined
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
- name: Create user {{ k8s_deployment_user }}
|
||||
user:
|
||||
name: "{{ k8s_deployment_user }}"
|
||||
groups: adm
|
||||
shell: /bin/bash
|
||||
|
||||
- name: Ensure that .ssh exists
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: "0700"
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
content: |
|
||||
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
|
||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Write private SSH key
|
||||
copy:
|
||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||
mode: "0400"
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Write public SSH key
|
||||
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
|
||||
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
args:
|
||||
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Fix ssh-pub-key permissions
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
mode: "0600"
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
@@ -1,51 +0,0 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: Download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: "0644"
|
||||
|
||||
- name: Extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
path: ansible.cfg
|
||||
mode: "0644"
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
@@ -1,92 +0,0 @@
|
||||
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||
|
||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||
|
||||
## Using an Ansible inventory
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
```
|
||||
|
||||
## Using Terraform and Ansible
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "0"
|
||||
number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
||||
# GlusterFS variables
|
||||
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||
image_gfs = "Ubuntu 16.04"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
gfs_volume_size_in_gb = "50"
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
@@ -1 +0,0 @@
|
||||
../../../inventory/local/group_vars
|
||||
@@ -1,43 +0,0 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
#
|
||||
# ## GlusterFS nodes
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
# [etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
# gfs_node2
|
||||
# gfs_node3
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
@@ -1 +0,0 @@
|
||||
../../../../roles/bootstrap-os
|
||||
@@ -1,50 +0,0 @@
|
||||
# Ansible Role: GlusterFS
|
||||
|
||||
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||
|
||||
Installs and configures GlusterFS on Linux.
|
||||
|
||||
## Requirements
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
## Dependencies
|
||||
|
||||
None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
## License
|
||||
|
||||
MIT / BSD
|
||||
|
||||
## Author Information
|
||||
|
||||
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "4.1"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.12"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||
disk_volume_device_1: /dev/vdb
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -1,113 +0,0 @@
|
||||
---
|
||||
# Include variables and define needed variables.
|
||||
- name: Include OS-specific variables.
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: "0644"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -1 +0,0 @@
|
||||
test file
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
name: glusterfs
|
||||
namespace: default
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"port": 1}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"subsets": [
|
||||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
glusterfs:
|
||||
endpoints: glusterfs
|
||||
path: gluster
|
||||
readOnly: false
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
@@ -1,27 +0,0 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
become: true
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: provision }
|
||||
@@ -1,33 +0,0 @@
|
||||
all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
node3:
|
||||
node4:
|
||||
heketi-node:
|
||||
vars:
|
||||
disk_volume_device_1: "/dev/vdb"
|
||||
hosts:
|
||||
<<: *kube_nodes
|
||||
@@ -1 +0,0 @@
|
||||
jmespath
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: "Load lvm kernel modules"
|
||||
become: true
|
||||
with_items:
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install glusterfs mount utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
@@ -1 +0,0 @@
|
||||
---
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
command: "killall "
|
||||
@@ -1,64 +0,0 @@
|
||||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
- name: "Get heketi initial pod state."
|
||||
register: "initial_heketi_pod"
|
||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||
changed_when: false
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
- name: "Prepare heketi volumes."
|
||||
include_tasks: "bootstrap/volumes.yml"
|
||||
|
||||
# Remove bootstrap heketi
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "bootstrap/tear-down.yml"
|
||||
|
||||
# Prepare heketi storage
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
|
||||
# ensure endpoints actually exist before trying to move database data to it
|
||||
- name: "Create heketi storage."
|
||||
include_tasks: "bootstrap/storage.yml"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: "0640"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Wait for heketi bootstrap to complete."
|
||||
changed_when: false
|
||||
register: "initial_heketi_state"
|
||||
vars:
|
||||
initial_heketi_state: { stdout: "{}" }
|
||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
- name: "Create heketi storage."
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
state: "present"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
register: "heketi_storage_result"
|
||||
- name: "Get state of heketi database copy job."
|
||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
vars:
|
||||
heketi_storage_state: { stdout: "{}" }
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||
until:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: "Get existing Heketi deploy resources."
|
||||
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_resources"
|
||||
changed_when: false
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "render"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_exists: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_created: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: "Clean up left over jobs."
|
||||
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
|
||||
changed_when: false
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||
include_tasks: "glusterfs/label.yml"
|
||||
with_items: "{{ groups['heketi-node'] }}"
|
||||
loop_control:
|
||||
loop_var: "node"
|
||||
- name: "Kubernetes Apps | Wait for daemonset to become available."
|
||||
register: "daemonset_state"
|
||||
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
|
||||
changed_when: false
|
||||
vars:
|
||||
daemonset_state: { stdout: "{}" }
|
||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||
until: "ready | int >= 3"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
- name: Get storage nodes
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
|
||||
- name: Get storage nodes again
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present | length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
- name: "Ensure heketi is up and running."
|
||||
changed_when: false
|
||||
register: "heketi_state"
|
||||
vars:
|
||||
heketi_state:
|
||||
stdout: "{}"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | GlusterFS"
|
||||
include_tasks: "glusterfs.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Secrets"
|
||||
include_tasks: "secret.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Test Heketi"
|
||||
register: "heketi_service_state"
|
||||
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||
when: "heketi_service_state.stdout == \"\""
|
||||
include_tasks: "bootstrap.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi"
|
||||
include_tasks: "heketi.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Topology"
|
||||
include_tasks: "topology.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Storage"
|
||||
include_tasks: "storage.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Storage Class"
|
||||
include_tasks: "storageclass.yml"
|
||||
|
||||
- name: "Clean up"
|
||||
include_tasks: "cleanup.yml"
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
- name: Get clusterrolebindings
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Render Heketi secret configuration."
|
||||
become: true
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: "0644"
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: "Test storage class."
|
||||
command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
|
||||
register: "storageclass"
|
||||
changed_when: false
|
||||
- name: "Test heketi service."
|
||||
command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
|
||||
register: "heketi_service"
|
||||
changed_when: false
|
||||
- name: "Ensure heketi service is available."
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "rendering"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,149 +0,0 @@
|
||||
{
|
||||
"kind": "DaemonSet",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs": "deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "GlusterFS Daemon Set",
|
||||
"tags": "glusterfs"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": {
|
||||
"storagenode" : "glusterfs"
|
||||
},
|
||||
"hostNetwork": true,
|
||||
"containers": [
|
||||
{
|
||||
"image": "gluster/gluster-centos:gluster4u0_centos7",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
"name": "glusterfs",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run",
|
||||
"mountPath": "/run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"mountPath": "/run/lvm"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"mountPath": "/etc/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"mountPath": "/var/log/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"mountPath": "/var/lib/glusterd"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"mountPath": "/dev"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"mountPath": "/sys/fs/cgroup"
|
||||
}
|
||||
],
|
||||
"securityContext": {
|
||||
"capabilities": {},
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/heketi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"hostPath": {
|
||||
"path": "/run/lvm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"hostPath": {
|
||||
"path": "/etc/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"hostPath": {
|
||||
"path": "/var/log/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/glusterd"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"hostPath": {
|
||||
"path": "/dev"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"hostPath": {
|
||||
"path": "/sys/fs/cgroup"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user