mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
1 Commits
master-pat
...
ant31-patc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6858988883 |
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -12,9 +12,3 @@ updates:
|
||||
patterns:
|
||||
- molecule
|
||||
- molecule-plugins*
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels:
|
||||
- release-note-none
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
name: Upgrade Kubespray components with new patches versions - all branches
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '22 2 * * *' # every day, 02:22 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
jobs:
|
||||
get-releases-branches:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
branches: ${{ steps.get-branches.outputs.data }}
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.3.2
|
||||
id: get-branches
|
||||
with:
|
||||
query: |
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 0, # TODO increment once we have release branch with the new checksums format
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
direction: DESC
|
||||
}) {
|
||||
nodes {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
variables: |
|
||||
owner: ${{ github.repository_owner }}
|
||||
name: ${{ github.event.repository.name }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-versions:
|
||||
needs: get-releases-branches
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch:
|
||||
- name: ${{ github.event.repository.default_branch }}
|
||||
- ${{ fromJSON(needs.get-releases-branches.outputs.branches).repository.refs.nodes }}
|
||||
uses: ./.github/workflows/upgrade-patch-versions.yml
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
name: Update patch updates on ${{ matrix.branch.name }}
|
||||
with:
|
||||
branch: ${{ matrix.branch.name }}
|
||||
44
.github/workflows/upgrade-patch-versions.yml
vendored
44
.github/workflows/upgrade-patch-versions.yml
vendored
@@ -1,44 +0,0 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
branch:
|
||||
description: Which branch to update with new patch versions
|
||||
default: master
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
update-patch-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.13'
|
||||
cache: 'pip'
|
||||
- run: pip install scripts/component_hash_update pre-commit
|
||||
- run: update-hashes
|
||||
env:
|
||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: pre-commit-hook-propagate
|
||||
path: |
|
||||
~/.cache/pre-commit
|
||||
- run: pre-commit run --all-files propagate-ansible-variables
|
||||
continue-on-error: true
|
||||
- uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
commit-message: Patch versions updates
|
||||
title: Patch versions updates - ${{ inputs.branch }}
|
||||
labels: bot
|
||||
branch: ${{ inputs.branch }}-patch-updates
|
||||
sign-commits: true
|
||||
body: |
|
||||
/kind feature
|
||||
|
||||
```release-note
|
||||
NONE
|
||||
```
|
||||
@@ -8,11 +8,11 @@ stages:
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
GIT_CONFIG_COUNT: 2
|
||||
GIT_CONFIG_KEY_0: user.email
|
||||
GIT_CONFIG_COUNT: 1
|
||||
GIT_CONFIG_KEY_0: user.key
|
||||
GIT_CONFIG_VALUE_0: "ci@kubespray.io"
|
||||
GIT_CONFIG_KEY_1: user.name
|
||||
GIT_CONFIG_VALUE_1: "Kubespray CI"
|
||||
GIT_CONFIG_VALUE_1: "CI"
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
|
||||
@@ -122,7 +122,7 @@ packet_amazon-linux-2-all-in-one:
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse15-6-calico:
|
||||
packet_opensuse-docker-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
@@ -159,9 +159,6 @@ packet_almalinux9-calico:
|
||||
packet_almalinux9-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_opensuse15-6-docker-cilium:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
|
||||
11
README.md
11
README.md
@@ -1,3 +1,4 @@
|
||||
test
|
||||
# Deploy a Production Ready Kubernetes Cluster
|
||||
|
||||

|
||||
@@ -113,23 +114,23 @@ Note:
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
||||
- [docker](https://www.docker.com/) 28.0
|
||||
- [containerd](https://containerd.io/) 2.0.4
|
||||
- [docker](https://www.docker.com/) 26.1
|
||||
- [containerd](https://containerd.io/) 2.0.3
|
||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.3
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.0
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||
|
||||
@@ -45,7 +45,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
|
||||
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: 3.0.6`,
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@@ -33,7 +33,7 @@ SUPPORTED_OS = {
|
||||
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
||||
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
||||
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
|
||||
92
contrib/network-storage/glusterfs/README.md
Normal file
92
contrib/network-storage/glusterfs/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||
|
||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||
|
||||
## Using an Ansible inventory
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
```
|
||||
|
||||
## Using Terraform and Ansible
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "0"
|
||||
number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
||||
# GlusterFS variables
|
||||
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||
image_gfs = "Ubuntu 16.04"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
gfs_volume_size_in_gb = "50"
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
29
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
29
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
1
contrib/network-storage/glusterfs/group_vars
Symbolic link
1
contrib/network-storage/glusterfs/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../inventory/local/group_vars
|
||||
43
contrib/network-storage/glusterfs/inventory.example
Normal file
43
contrib/network-storage/glusterfs/inventory.example
Normal file
@@ -0,0 +1,43 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
#
|
||||
# ## GlusterFS nodes
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
# [etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
# gfs_node2
|
||||
# gfs_node3
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../roles/bootstrap-os
|
||||
50
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
50
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Ansible Role: GlusterFS
|
||||
|
||||
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||
|
||||
Installs and configures GlusterFS on Linux.
|
||||
|
||||
## Requirements
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
## Dependencies
|
||||
|
||||
None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
## License
|
||||
|
||||
MIT / BSD
|
||||
|
||||
## Author Information
|
||||
|
||||
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "4.1"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.12"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||
disk_volume_device_1: /dev/vdb
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -0,0 +1,113 @@
|
||||
---
|
||||
# Include variables and define needed variables.
|
||||
- name: Include OS-specific variables.
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: "0644"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -0,0 +1 @@
|
||||
test file
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
name: glusterfs
|
||||
namespace: default
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"port": 1}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"subsets": [
|
||||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
glusterfs:
|
||||
endpoints: glusterfs
|
||||
path: gluster
|
||||
readOnly: false
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
27
contrib/network-storage/heketi/README.md
Normal file
27
contrib/network-storage/heketi/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||
11
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
11
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
become: true
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
12
contrib/network-storage/heketi/heketi.yml
Normal file
12
contrib/network-storage/heketi/heketi.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: provision }
|
||||
33
contrib/network-storage/heketi/inventory.yml.sample
Normal file
33
contrib/network-storage/heketi/inventory.yml.sample
Normal file
@@ -0,0 +1,33 @@
|
||||
all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
node3:
|
||||
node4:
|
||||
heketi-node:
|
||||
vars:
|
||||
disk_volume_device_1: "/dev/vdb"
|
||||
hosts:
|
||||
<<: *kube_nodes
|
||||
1
contrib/network-storage/heketi/requirements.txt
Normal file
1
contrib/network-storage/heketi/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
jmespath
|
||||
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: "Load lvm kernel modules"
|
||||
become: true
|
||||
with_items:
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install glusterfs mount utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
@@ -0,0 +1 @@
|
||||
---
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
command: "killall "
|
||||
@@ -0,0 +1,64 @@
|
||||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
- name: "Get heketi initial pod state."
|
||||
register: "initial_heketi_pod"
|
||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||
changed_when: false
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
- name: "Prepare heketi volumes."
|
||||
include_tasks: "bootstrap/volumes.yml"
|
||||
|
||||
# Remove bootstrap heketi
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "bootstrap/tear-down.yml"
|
||||
|
||||
# Prepare heketi storage
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
|
||||
# ensure endpoints actually exist before trying to move database data to it
|
||||
- name: "Create heketi storage."
|
||||
include_tasks: "bootstrap/storage.yml"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: "0640"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Wait for heketi bootstrap to complete."
|
||||
changed_when: false
|
||||
register: "initial_heketi_state"
|
||||
vars:
|
||||
initial_heketi_state: { stdout: "{}" }
|
||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
- name: "Create heketi storage."
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
state: "present"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
register: "heketi_storage_result"
|
||||
- name: "Get state of heketi database copy job."
|
||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
vars:
|
||||
heketi_storage_state: { stdout: "{}" }
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||
until:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: "Get existing Heketi deploy resources."
|
||||
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_resources"
|
||||
changed_when: false
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "render"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -0,0 +1,41 @@
|
||||
---
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_exists: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_created: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: "Clean up left over jobs."
|
||||
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
|
||||
changed_when: false
|
||||
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||
include_tasks: "glusterfs/label.yml"
|
||||
with_items: "{{ groups['heketi-node'] }}"
|
||||
loop_control:
|
||||
loop_var: "node"
|
||||
- name: "Kubernetes Apps | Wait for daemonset to become available."
|
||||
register: "daemonset_state"
|
||||
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
|
||||
changed_when: false
|
||||
vars:
|
||||
daemonset_state: { stdout: "{}" }
|
||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||
until: "ready | int >= 3"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Get storage nodes
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
|
||||
- name: Get storage nodes again
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present | length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
- name: "Ensure heketi is up and running."
|
||||
changed_when: false
|
||||
register: "heketi_state"
|
||||
vars:
|
||||
heketi_state:
|
||||
stdout: "{}"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | GlusterFS"
|
||||
include_tasks: "glusterfs.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Secrets"
|
||||
include_tasks: "secret.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Test Heketi"
|
||||
register: "heketi_service_state"
|
||||
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||
when: "heketi_service_state.stdout == \"\""
|
||||
include_tasks: "bootstrap.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi"
|
||||
include_tasks: "heketi.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Topology"
|
||||
include_tasks: "topology.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Storage"
|
||||
include_tasks: "storage.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Storage Class"
|
||||
include_tasks: "storageclass.yml"
|
||||
|
||||
- name: "Clean up"
|
||||
include_tasks: "cleanup.yml"
|
||||
@@ -0,0 +1,45 @@
|
||||
---
|
||||
- name: Get clusterrolebindings
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Render Heketi secret configuration."
|
||||
become: true
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: "0644"
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: "Test storage class."
|
||||
command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
|
||||
register: "storageclass"
|
||||
changed_when: false
|
||||
- name: "Test heketi service."
|
||||
command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
|
||||
register: "heketi_service"
|
||||
changed_when: false
|
||||
- name: "Ensure heketi service is available."
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "rendering"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -0,0 +1,149 @@
|
||||
{
|
||||
"kind": "DaemonSet",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs": "deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "GlusterFS Daemon Set",
|
||||
"tags": "glusterfs"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": {
|
||||
"storagenode" : "glusterfs"
|
||||
},
|
||||
"hostNetwork": true,
|
||||
"containers": [
|
||||
{
|
||||
"image": "gluster/gluster-centos:gluster4u0_centos7",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
"name": "glusterfs",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run",
|
||||
"mountPath": "/run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"mountPath": "/run/lvm"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"mountPath": "/etc/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"mountPath": "/var/log/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"mountPath": "/var/lib/glusterd"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"mountPath": "/dev"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"mountPath": "/sys/fs/cgroup"
|
||||
}
|
||||
],
|
||||
"securityContext": {
|
||||
"capabilities": {},
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/heketi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"hostPath": {
|
||||
"path": "/run/lvm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"hostPath": {
|
||||
"path": "/etc/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"hostPath": {
|
||||
"path": "/var/log/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/glusterd"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"hostPath": {
|
||||
"path": "/dev"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"hostPath": {
|
||||
"path": "/sys/fs/cgroup"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,138 @@
|
||||
{
|
||||
"kind": "List",
|
||||
"apiVersion": "v1",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-service",
|
||||
"deploy-heketi": "support"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Exposes Heketi Service"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"name": "deploy-heketi"
|
||||
},
|
||||
"ports": [
|
||||
{
|
||||
"name": "deploy-heketi",
|
||||
"port": 8080,
|
||||
"targetPort": 8080
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-deployment",
|
||||
"deploy-heketi": "deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Defines how to deploy Heketi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "deploy-heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"name": "deploy-heketi",
|
||||
"glusterfs": "heketi-pod",
|
||||
"deploy-heketi": "pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "deploy-heketi",
|
||||
"env": [
|
||||
{
|
||||
"name": "HEKETI_EXECUTOR",
|
||||
"value": "kubernetes"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_DB_PATH",
|
||||
"value": "/var/lib/heketi/heketi.db"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_FSTAB",
|
||||
"value": "/var/lib/heketi/fstab"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||
"value": "14"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||
"value": "y"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "db",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"mountPath": "/etc/heketi"
|
||||
}
|
||||
],
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "db"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"secret": {
|
||||
"secretName": "heketi-config-secret"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,164 @@
|
||||
{
|
||||
"kind": "List",
|
||||
"apiVersion": "v1",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-db-backup",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-db",
|
||||
"heketi": "db"
|
||||
}
|
||||
},
|
||||
"data": {
|
||||
},
|
||||
"type": "Opaque"
|
||||
},
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-service",
|
||||
"deploy-heketi": "support"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Exposes Heketi Service"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"name": "heketi"
|
||||
},
|
||||
"ports": [
|
||||
{
|
||||
"name": "heketi",
|
||||
"port": 8080,
|
||||
"targetPort": 8080
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Defines how to deploy Heketi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"name": "heketi",
|
||||
"glusterfs": "heketi-pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "heketi",
|
||||
"env": [
|
||||
{
|
||||
"name": "HEKETI_EXECUTOR",
|
||||
"value": "kubernetes"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_DB_PATH",
|
||||
"value": "/var/lib/heketi/heketi.db"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_FSTAB",
|
||||
"value": "/var/lib/heketi/fstab"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||
"value": "14"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||
"value": "y"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"mountPath": "/backupdb",
|
||||
"name": "heketi-db-secret"
|
||||
},
|
||||
{
|
||||
"name": "db",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"mountPath": "/etc/heketi"
|
||||
}
|
||||
],
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "db",
|
||||
"glusterfs": {
|
||||
"endpoints": "heketi-storage-endpoints",
|
||||
"path": "heketidbstorage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "heketi-db-secret",
|
||||
"secret": {
|
||||
"secretName": "heketi-db-backup"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"secret": {
|
||||
"secretName": "heketi-config-secret"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "heketi-service-account"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "List",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-storage-endpoints",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"subsets": [
|
||||
{% set nodeblocks = [] %}
|
||||
{% for node in nodes %}
|
||||
{% set nodeblock %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{ hostvars[node].ip }}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
{% endset %}
|
||||
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodeblocks|join(',') }}
|
||||
]
|
||||
},
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-storage-endpoints",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"port": 1,
|
||||
"targetPort": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"_port_comment": "Heketi Server Port Number",
|
||||
"port": "8080",
|
||||
|
||||
"_use_auth": "Enable JWT authorization. Please enable for deployment",
|
||||
"use_auth": true,
|
||||
|
||||
"_jwt": "Private keys for access",
|
||||
"jwt": {
|
||||
"_admin": "Admin has access to all APIs",
|
||||
"admin": {
|
||||
"key": "{{ heketi_admin_key }}"
|
||||
},
|
||||
"_user": "User only has access to /volumes endpoint",
|
||||
"user": {
|
||||
"key": "{{ heketi_user_key }}"
|
||||
}
|
||||
},
|
||||
|
||||
"_glusterfs_comment": "GlusterFS Configuration",
|
||||
"glusterfs": {
|
||||
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
|
||||
"executor": "kubernetes",
|
||||
|
||||
"_db_comment": "Database file name",
|
||||
"db": "/var/lib/heketi/heketi.db",
|
||||
|
||||
"kubeexec": {
|
||||
"rebalance_on_expansion": true
|
||||
},
|
||||
|
||||
"sshexec": {
|
||||
"rebalance_on_expansion": true,
|
||||
"keyfile": "/etc/heketi/private_key",
|
||||
"fstab": "/etc/fstab",
|
||||
"port": "22",
|
||||
"user": "root",
|
||||
"sudo": false
|
||||
}
|
||||
},
|
||||
|
||||
"_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
|
||||
"backup_db_to_kube_secret": false
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gluster
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/glusterfs
|
||||
parameters:
|
||||
resturl: "http://{{ endpoint_address }}:8080"
|
||||
restuser: "admin"
|
||||
restuserkey: "{{ heketi_admin_key }}"
|
||||
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"clusters": [
|
||||
{
|
||||
"nodes": [
|
||||
{% set nodeblocks = [] %}
|
||||
{% for node in nodes %}
|
||||
{% set nodeblock %}
|
||||
{
|
||||
"node": {
|
||||
"hostnames": {
|
||||
"manage": [
|
||||
"{{ node }}"
|
||||
],
|
||||
"storage": [
|
||||
"{{ hostvars[node].ip }}"
|
||||
]
|
||||
},
|
||||
"zone": 1
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "{{ hostvars[node]['disk_volume_device_1'] }}",
|
||||
"destroydata": false
|
||||
}
|
||||
]
|
||||
}
|
||||
{% endset %}
|
||||
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodeblocks|join(',') }}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
heketi_remove_lvm: false
|
||||
@@ -0,0 +1,52 @@
|
||||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
- name: "Get volume group information."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "vgremove {{ volume_group }} --yes"
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
|
||||
@@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Remove storage class.
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs.
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service.
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
@@ -24,7 +24,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:v{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# run ansible to expand templates
|
||||
|
||||
@@ -233,7 +233,7 @@ cilium_operator_extra_volume_mounts:
|
||||
## Choose Cilium version
|
||||
|
||||
```yml
|
||||
cilium_version: "1.15.9"
|
||||
cilium_version: v1.12.1
|
||||
```
|
||||
|
||||
## Add variable to config
|
||||
|
||||
@@ -25,7 +25,7 @@ Some variables of note include:
|
||||
* *calico_vxlan_mode* - Configures Calico vxlan encapsulation - valid values are 'Never', 'Always' and 'CrossSubnet' (default 'Always')
|
||||
* *calico_network_backend* - Configures Calico network backend - valid values are 'none', 'bird' and 'vxlan' (default 'vxlan')
|
||||
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables, ipvs, nftables mode
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||
* *kube_version* - Specify a given Kubernetes version
|
||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
|
||||
|
||||
@@ -14,7 +14,7 @@ debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
|
||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse15 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
||||
@@ -33,7 +33,7 @@ debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse15 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
@@ -52,7 +52,7 @@ debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse15 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -59,8 +59,6 @@ ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||
--extra-vars "node=nodename,nodename2"
|
||||
```
|
||||
|
||||
> Note: The playbook does not currently support the removal of the first control plane or etcd node. These nodes are essential for maintaining cluster operations and must remain intact.
|
||||
|
||||
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=false`
|
||||
to skip the node reset step. If one node is unavailable, but others you wish
|
||||
to remove are able to connect via SSH, you could set `reset_nodes=false` as a host
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# openSUSE Leap 15.6 and Tumbleweed
|
||||
# openSUSE Leap 15.3 and Tumbleweed
|
||||
|
||||
openSUSE Leap installation Notes:
|
||||
|
||||
|
||||
@@ -28,13 +28,13 @@ If you wanted to upgrade just kube_version from v1.18.10 to v1.19.7, you could
|
||||
deploy the following way:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=1.18.10 -e upgrade_cluster_setup=true
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.18.10 -e upgrade_cluster_setup=true
|
||||
```
|
||||
|
||||
And then repeat with 1.19.7 as kube_version:
|
||||
And then repeat with v1.19.7 as kube_version:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=1.19.7 -e upgrade_cluster_setup=true
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.19.7 -e upgrade_cluster_setup=true
|
||||
```
|
||||
|
||||
The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migrate the deploys of e.g kube-apiserver inside the cluster immediately which is usually only done in the graceful upgrade. (Refer to [#4139](https://github.com/kubernetes-sigs/kubespray/issues/4139) and [#4736](https://github.com/kubernetes-sigs/kubespray/issues/4736))
|
||||
@@ -48,7 +48,7 @@ existing cluster. That means there must be at least 1 kube_control_plane already
|
||||
deployed.
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=1.19.7
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.19.7
|
||||
```
|
||||
|
||||
After a successful upgrade, the Server Version should be updated:
|
||||
@@ -62,7 +62,7 @@ Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7", GitCom
|
||||
You can control how many nodes are upgraded at the same time by modifying the ansible variable named `serial`, as explained [here](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_strategies.html#setting-the-batch-size-with-serial). If you don't set this variable, it will upgrade the cluster nodes in batches of 20% of the available nodes. Setting `serial=1` would mean upgrade one node at a time.
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=1.20.7 -e "serial=1"
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 -e "serial=1"
|
||||
```
|
||||
|
||||
### Pausing the upgrade
|
||||
@@ -90,14 +90,14 @@ ansible-playbook facts.yml -b -i inventory/sample/hosts.ini
|
||||
After this upgrade control plane and etcd groups [#5147](https://github.com/kubernetes-sigs/kubespray/issues/5147):
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=1.20.7 --limit "kube_control_plane:etcd"
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "kube_control_plane:etcd"
|
||||
```
|
||||
|
||||
Now you can upgrade other nodes in any order and quantity:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=1.20.7 --limit "node4:node6:node7:node12"
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=1.20.7 --limit "node5*"
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "node4:node6:node7:node12"
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.20.7 --limit "node5*"
|
||||
```
|
||||
|
||||
## Multiple upgrades
|
||||
@@ -126,7 +126,7 @@ v.22.0 -> v2.24.0 : ✕
|
||||
|
||||
Assuming you don't explicitly define a kubernetes version in your k8s_cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
|
||||
|
||||
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=1.11.3`
|
||||
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
|
||||
|
||||
Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
|
||||
|
||||
|
||||
@@ -180,7 +180,7 @@ cert_manager_enabled: false
|
||||
metallb_enabled: false
|
||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
metallb_namespace: "metallb-system"
|
||||
# metallb_version: 0.13.9
|
||||
# metallb_version: v0.13.9
|
||||
# metallb_protocol: "layer2"
|
||||
# metallb_port: "7472"
|
||||
# metallb_memberlist_port: "7946"
|
||||
@@ -242,7 +242,7 @@ metallb_namespace: "metallb-system"
|
||||
# - pool2
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: 2.14.5
|
||||
# argocd_version: v2.14.5
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
|
||||
@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: 1.32.2
|
||||
kube_version: v1.32.2
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -118,8 +118,7 @@ kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.i
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables, nftables
|
||||
# TODO: it needs to be changed to nftables when the upstream use nftables as default
|
||||
# Can be ipvs, iptables
|
||||
kube_proxy_mode: ipvs
|
||||
|
||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
# cilium_version: "1.15.9"
|
||||
# cilium_version: "v1.15.9"
|
||||
|
||||
# Log-level
|
||||
# cilium_debug: false
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
# Kube router version
|
||||
# Default to v2
|
||||
# kube_router_version: "2.0.0"
|
||||
# kube_router_version: "v2.0.0"
|
||||
# Uncomment to use v1 (Deprecated)
|
||||
# kube_router_version: "1.6.0"
|
||||
# kube_router_version: "v1.6.0"
|
||||
|
||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
|
||||
# kube_router_run_router: true
|
||||
|
||||
@@ -1,19 +1,9 @@
|
||||
---
|
||||
- name: Validate nodes for removal
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Assert that nodes are specified for removal
|
||||
assert:
|
||||
that:
|
||||
- node is defined
|
||||
- node | length > 0
|
||||
msg: "No nodes specified for removal. The `node` variable must be set explicitly."
|
||||
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
|
||||
- name: Confirm node removal
|
||||
hosts: "{{ node | default('this_is_unreachable') }}"
|
||||
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Confirm Execution
|
||||
@@ -34,7 +24,7 @@
|
||||
when: reset_nodes | default(True) | bool
|
||||
|
||||
- name: Reset node
|
||||
hosts: "{{ node | default('this_is_unreachable') }}"
|
||||
hosts: "{{ node | default('kube_node') }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
pre_tasks:
|
||||
@@ -50,7 +40,7 @@
|
||||
|
||||
# Currently cannot remove first control plane node or first etcd node
|
||||
- name: Post node removal
|
||||
hosts: "{{ node | default('this_is_unreachable') }}"
|
||||
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
docker_version: '28.0'
|
||||
docker_version: '26.1'
|
||||
docker_cli_version: "{{ docker_version }}"
|
||||
|
||||
docker_package_info:
|
||||
@@ -53,8 +53,8 @@ docker_fedora_repo_base_url: 'https://download.docker.com/linux/fedora/{{ ansibl
|
||||
docker_fedora_repo_gpgkey: 'https://download.docker.com/linux/fedora/gpg'
|
||||
|
||||
# CentOS/RedHat docker-ce repo
|
||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/rhel/{{ ansible_distribution_major_version }}/$basearch/stable'
|
||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/rhel/gpg'
|
||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/{{ ansible_distribution_major_version }}/$basearch/stable'
|
||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||
|
||||
# Ubuntu docker-ce repo
|
||||
docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
|
||||
@@ -25,17 +25,8 @@ containerd_versioned_pkg:
|
||||
'1.6.28': "{{ containerd_package }}=1.6.28-2"
|
||||
'1.6.31': "{{ containerd_package }}=1.6.31-1"
|
||||
'1.6.32': "{{ containerd_package }}=1.6.32-1"
|
||||
'1.6.33': "{{ containerd_package }}=1.6.33-1"
|
||||
'1.7.18': "{{ containerd_package }}=1.7.18-1"
|
||||
'1.7.19': "{{ containerd_package }}=1.7.19-1"
|
||||
'1.7.20': "{{ containerd_package }}=1.7.20-1"
|
||||
'1.7.21': "{{ containerd_package }}=1.7.21-1"
|
||||
'1.7.22': "{{ containerd_package }}=1.7.22-1"
|
||||
'1.7.23': "{{ containerd_package }}=1.7.23-1"
|
||||
'1.7.24': "{{ containerd_package }}=1.7.24-1"
|
||||
'1.7.25': "{{ containerd_package }}=1.7.25-1"
|
||||
'stable': "{{ containerd_package }}=1.7.25-1"
|
||||
'edge': "{{ containerd_package }}=1.7.25-1"
|
||||
'stable': "{{ containerd_package }}=1.6.32-1"
|
||||
'edge': "{{ containerd_package }}=1.6.32-1"
|
||||
|
||||
# https://download.docker.com/linux/debian/
|
||||
docker_versioned_pkg:
|
||||
@@ -47,16 +38,9 @@ docker_versioned_pkg:
|
||||
'24.0': docker-ce=5:24.0.9-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'25.0': docker-ce=5:25.0.5-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.0': docker-ce=5:26.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce=5:26.1.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.0': docker-ce=5:27.0.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.1': docker-ce=5:27.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.2': docker-ce=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.3': docker-ce=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.4': docker-ce=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.5': docker-ce=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'28.0': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce=5:26.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce=5:24.0.9-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce=5:24.0.9-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
|
||||
docker_cli_versioned_pkg:
|
||||
'latest': docker-ce-cli
|
||||
@@ -67,16 +51,9 @@ docker_cli_versioned_pkg:
|
||||
'24.0': docker-ce-cli=5:24.0.9-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'25.0': docker-ce-cli=5:25.0.5-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.0': docker-ce-cli=5:26.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce-cli=5:26.1.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.0': docker-ce-cli=5:27.0.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.1': docker-ce-cli=5:27.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.2': docker-ce-cli=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.3': docker-ce-cli=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.4': docker-ce-cli=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.5': docker-ce-cli=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'28.0': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce-cli=5:26.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce-cli=5:26.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce-cli=5:26.1.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
|
||||
@@ -25,17 +25,8 @@ containerd_versioned_pkg:
|
||||
'1.6.28': "{{ containerd_package }}-1.6.28-3.2.fc{{ ansible_distribution_major_version }}"
|
||||
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.6.33': "{{ containerd_package }}-1.6.33-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.18': "{{ containerd_package }}-1.7.18-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.19': "{{ containerd_package }}-1.7.19-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.20': "{{ containerd_package }}-1.7.20-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.21': "{{ containerd_package }}-1.7.21-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.22': "{{ containerd_package }}-1.7.22-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.23': "{{ containerd_package }}-1.7.23-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.24': "{{ containerd_package }}-1.7.24-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'1.7.25': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'stable': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'edge': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'stable': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
'edge': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}"
|
||||
|
||||
# https://docs.docker.com/install/linux/docker-ce/fedora/
|
||||
# https://download.docker.com/linux/fedora/<fedora-version>/x86_64/stable/Packages/
|
||||
@@ -46,16 +37,9 @@ docker_versioned_pkg:
|
||||
'23.0': docker-ce-3:23.0.6-1.fc{{ ansible_distribution_major_version }}
|
||||
'24.0': docker-ce-3:24.0.9-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.0': docker-ce-3:26.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-3:26.1.4-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.0': docker-ce-3:27.0.3-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.1': docker-ce-3:27.1.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.2': docker-ce-3:27.2.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.3': docker-ce-3:27.3.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.4': docker-ce-3:27.4.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.5': docker-ce-3:27.5.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'28.0': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-3:26.1.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-3:26.1.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-3:26.1.2-1.fc{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_cli_versioned_pkg:
|
||||
'latest': docker-ce-cli
|
||||
@@ -64,16 +48,9 @@ docker_cli_versioned_pkg:
|
||||
'23.0': docker-ce-cli-1:23.0.6-1.fc{{ ansible_distribution_major_version }}
|
||||
'24.0': docker-ce-cli-1:24.0.9-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.0': docker-ce-cli-1:26.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-cli-1:26.1.4-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.0': docker-ce-cli-1:27.0.3-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.1': docker-ce-cli-1:27.1.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.2': docker-ce-cli-1:27.2.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.3': docker-ce-cli-1:27.3.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.4': docker-ce-cli-1:27.4.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'27.5': docker-ce-cli-1:27.5.1-1.fc{{ ansible_distribution_major_version }}
|
||||
'28.0': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-cli-1:26.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-cli-1:26.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-cli-1:26.0.2-1.fc{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_package_info:
|
||||
enablerepo: "docker-ce"
|
||||
|
||||
63
roles/container-engine/docker/vars/redhat-7.yml
Normal file
63
roles/container-engine/docker/vars/redhat-7.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
# containerd versions are only relevant for docker
|
||||
containerd_versioned_pkg:
|
||||
'latest': "{{ containerd_package }}"
|
||||
'1.3.7': "{{ containerd_package }}-1.3.7-3.1.el7"
|
||||
'1.3.9': "{{ containerd_package }}-1.3.9-3.1.el7"
|
||||
'1.4.3': "{{ containerd_package }}-1.4.3-3.2.el7"
|
||||
'1.4.4': "{{ containerd_package }}-1.4.4-3.1.el7"
|
||||
'1.4.6': "{{ containerd_package }}-1.4.6-3.1.el7"
|
||||
'1.4.9': "{{ containerd_package }}-1.4.9-3.1.el7"
|
||||
'1.4.12': "{{ containerd_package }}-1.4.12-3.1.el7"
|
||||
'1.6.4': "{{ containerd_package }}-1.6.4-3.1.el7"
|
||||
'1.6.6': "{{ containerd_package }}-1.6.6-3.1.el7"
|
||||
'1.6.7': "{{ containerd_package }}-1.6.7-3.1.el7"
|
||||
'1.6.8': "{{ containerd_package }}-1.6.8-3.1.el7"
|
||||
'1.6.9': "{{ containerd_package }}-1.6.9-3.1.el7"
|
||||
'1.6.10': "{{ containerd_package }}-1.6.10-3.1.el7"
|
||||
'1.6.11': "{{ containerd_package }}-1.6.11-3.1.el7"
|
||||
'1.6.12': "{{ containerd_package }}-1.6.12-3.1.el7"
|
||||
'1.6.13': "{{ containerd_package }}-1.6.13-3.1.el7"
|
||||
'1.6.14': "{{ containerd_package }}-1.6.14-3.1.el7"
|
||||
'1.6.15': "{{ containerd_package }}-1.6.15-3.1.el7"
|
||||
'1.6.16': "{{ containerd_package }}-1.6.16-3.1.el7"
|
||||
'1.6.18': "{{ containerd_package }}-1.6.18-3.1.el7"
|
||||
'1.6.28': "{{ containerd_package }}-1.6.28-3.1.el7"
|
||||
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.el7"
|
||||
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.el7"
|
||||
'stable': "{{ containerd_package }}-1.6.32-3.1.el7"
|
||||
'edge': "{{ containerd_package }}-1.6.32-3.1.el7"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||
# https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/
|
||||
# or do 'yum --showduplicates list docker-engine'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-ce
|
||||
'18.09': docker-ce-18.09.9-3.el7
|
||||
'19.03': docker-ce-19.03.15-3.el7
|
||||
'20.10': docker-ce-20.10.20-3.el7
|
||||
'23.0': docker-ce-23.0.6-1.el7
|
||||
'24.0': docker-ce-24.0.9-1.el7
|
||||
'26.0': docker-ce-26.0.2-1.el7
|
||||
'26.1': docker-ce-26.1.2-1.el7
|
||||
'stable': docker-ce-26.1.2-1.el7
|
||||
'edge': docker-ce-26.1.2-1.el7
|
||||
|
||||
docker_cli_versioned_pkg:
|
||||
'latest': docker-ce-cli
|
||||
'18.09': docker-ce-cli-18.09.9-3.el7
|
||||
'19.03': docker-ce-cli-19.03.15-3.el7
|
||||
'20.10': docker-ce-cli-20.10.20-3.el7
|
||||
'23.0': docker-ce-cli-23.0.6-1.el7
|
||||
'24.0': docker-ce-cli-24.0.9-1.el7
|
||||
'26.0': docker-ce-cli-26.0.2-1.el7
|
||||
'26.1': docker-ce-cli-26.1.2-1.el7
|
||||
'stable': docker-ce-cli-26.1.2-1.el7
|
||||
'edge': docker-ce-cli-26.1.2-1.el7
|
||||
|
||||
docker_package_info:
|
||||
enablerepo: "docker-ce"
|
||||
pkgs:
|
||||
- "{{ containerd_versioned_pkg[docker_containerd_version | string] }}"
|
||||
- "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
|
||||
- "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
@@ -25,20 +25,11 @@ containerd_versioned_pkg:
|
||||
'1.6.28': "{{ containerd_package }}-1.6.28-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.6.33': "{{ containerd_package }}-1.6.33-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.18': "{{ containerd_package }}-1.7.18-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.19': "{{ containerd_package }}-1.7.19-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.20': "{{ containerd_package }}-1.7.20-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.21': "{{ containerd_package }}-1.7.21-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.22': "{{ containerd_package }}-1.7.22-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.23': "{{ containerd_package }}-1.7.23-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.24': "{{ containerd_package }}-1.7.24-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'1.7.25': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'stable': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'edge': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'stable': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}"
|
||||
'edge': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/rhel/#install-from-a-package
|
||||
# https://download.docker.com/linux/rhel/<rhel_version>>/x86_64/stable/Packages/
|
||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||
# https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/
|
||||
# or do 'yum --showduplicates list docker-engine'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker-ce
|
||||
@@ -48,16 +39,9 @@ docker_versioned_pkg:
|
||||
'23.0': docker-ce-3:23.0.6-1.el{{ ansible_distribution_major_version }}
|
||||
'24.0': docker-ce-3:24.0.9-1.el{{ ansible_distribution_major_version }}
|
||||
'26.0': docker-ce-3:26.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-3:26.1.4-1.el{{ ansible_distribution_major_version }}
|
||||
'27.0': docker-ce-3:27.0.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.1': docker-ce-3:27.1.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.2': docker-ce-3:27.2.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.3': docker-ce-3:27.3.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.4': docker-ce-3:27.4.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.5': docker-ce-3:27.5.3-1.el{{ ansible_distribution_major_version }}
|
||||
'28.0': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-3:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-3:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-3:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_cli_versioned_pkg:
|
||||
'latest': docker-ce-cli
|
||||
@@ -67,16 +51,9 @@ docker_cli_versioned_pkg:
|
||||
'23.0': docker-ce-cli-1:23.0.6-1.el{{ ansible_distribution_major_version }}
|
||||
'24.0': docker-ce-cli-1:24.0.9-1.el{{ ansible_distribution_major_version }}
|
||||
'26.0': docker-ce-cli-1:26.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-cli-1:26.1.4-1.el{{ ansible_distribution_major_version }}
|
||||
'27.0': docker-ce-cli-1:27.0.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.1': docker-ce-cli-1:27.1.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.2': docker-ce-cli-1:27.2.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.3': docker-ce-cli-1:27.3.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.4': docker-ce-cli-1:27.4.3-1.el{{ ansible_distribution_major_version }}
|
||||
'27.5': docker-ce-cli-1:27.5.3-1.el{{ ansible_distribution_major_version }}
|
||||
'28.0': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }}
|
||||
'26.1': docker-ce-cli-1:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
'stable': docker-ce-cli-1:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
'edge': docker-ce-cli-1:26.1.2-1.el{{ ansible_distribution_major_version }}
|
||||
|
||||
docker_package_info:
|
||||
enablerepo: "docker-ce"
|
||||
|
||||
@@ -2,6 +2,13 @@
|
||||
# containerd versions are only relevant for docker
|
||||
containerd_versioned_pkg:
|
||||
'latest': "{{ containerd_package }}"
|
||||
'1.3.7': "{{ containerd_package }}=1.3.7-1"
|
||||
'1.3.9': "{{ containerd_package }}=1.3.9-1"
|
||||
'1.4.3': "{{ containerd_package }}=1.4.3-2"
|
||||
'1.4.4': "{{ containerd_package }}=1.4.4-1"
|
||||
'1.4.6': "{{ containerd_package }}=1.4.6-1"
|
||||
'1.4.9': "{{ containerd_package }}=1.4.9-1"
|
||||
'1.4.12': "{{ containerd_package }}=1.4.12-1"
|
||||
'1.6.4': "{{ containerd_package }}=1.6.4-1"
|
||||
'1.6.6': "{{ containerd_package }}=1.6.6-1"
|
||||
'1.6.7': "{{ containerd_package }}=1.6.7-1"
|
||||
@@ -18,17 +25,8 @@ containerd_versioned_pkg:
|
||||
'1.6.28': "{{ containerd_package }}=1.6.28-2"
|
||||
'1.6.31': "{{ containerd_package }}=1.6.31-1"
|
||||
'1.6.32': "{{ containerd_package }}=1.6.32-1"
|
||||
'1.6.33': "{{ containerd_package }}=1.6.33-1"
|
||||
'1.7.18': "{{ containerd_package }}=1.7.18-1"
|
||||
'1.7.19': "{{ containerd_package }}=1.7.19-1"
|
||||
'1.7.20': "{{ containerd_package }}=1.7.20-1"
|
||||
'1.7.21': "{{ containerd_package }}=1.7.21-1"
|
||||
'1.7.22': "{{ containerd_package }}=1.7.22-1"
|
||||
'1.7.23': "{{ containerd_package }}=1.7.23-1"
|
||||
'1.7.24': "{{ containerd_package }}=1.7.24-1"
|
||||
'1.7.25': "{{ containerd_package }}=1.7.25-1"
|
||||
'stable': "{{ containerd_package }}=1.7.25-1"
|
||||
'edge': "{{ containerd_package }}=1.7.25-1"
|
||||
'stable': "{{ containerd_package }}=1.6.32-1"
|
||||
'edge': "{{ containerd_package }}=1.6.32-1"
|
||||
|
||||
# https://download.docker.com/linux/ubuntu/
|
||||
docker_versioned_pkg:
|
||||
@@ -39,16 +37,9 @@ docker_versioned_pkg:
|
||||
'23.0': docker-ce=5:23.0.6-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'24.0': docker-ce=5:24.0.9-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.0': docker-ce=5:26.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce=5:26.1.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.0': docker-ce=5:27.0.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.1': docker-ce=5:27.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.2': docker-ce=5:27.2.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.3': docker-ce=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.4': docker-ce=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.5': docker-ce=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'28.0': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
|
||||
docker_cli_versioned_pkg:
|
||||
'latest': docker-ce-cli
|
||||
@@ -58,16 +49,9 @@ docker_cli_versioned_pkg:
|
||||
'23.0': docker-ce-cli=5:23.0.6-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'24.0': docker-ce-cli=5:24.0.9-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.0': docker-ce-cli=5:26.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce-cli=5:26.1.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.0': docker-ce-cli=5:27.0.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.1': docker-ce-cli=5:27.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.2': docker-ce-cli=5:27.2.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.3': docker-ce-cli=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.4': docker-ce-cli=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'27.5': docker-ce-cli=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'28.0': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'26.1': docker-ce-cli=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'stable': docker-ce-cli=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
'edge': docker-ce-cli=5:26.1.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
|
||||
@@ -47,7 +47,7 @@ spec:
|
||||
- coredns
|
||||
{% if enable_nodelocaldns_secondary %}
|
||||
- -skipteardown
|
||||
{% endif %}
|
||||
{% else %}
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@@ -55,9 +55,10 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: {{ nodelocaldns_prometheus_port }}
|
||||
- containerPort: 9253
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% if nodelocaldns_bind_metrics_host_ip %}
|
||||
|
||||
@@ -39,10 +39,6 @@ spec:
|
||||
cpu: {{ nodelocaldns_cpu_requests }}
|
||||
memory: {{ nodelocaldns_memory_requests }}
|
||||
args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ]
|
||||
ports:
|
||||
- containerPort: {{ nodelocaldns_secondary_prometheus_port }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
privileged: true
|
||||
{% if nodelocaldns_bind_metrics_host_ip %}
|
||||
|
||||
@@ -1716,7 +1716,7 @@ spec:
|
||||
value: memberlist
|
||||
- name: METALLB_DEPLOYMENT
|
||||
value: controller
|
||||
image: "{{ metallb_controller_image_repo }}:{{ metallb_image_tag }}"
|
||||
image: "{{ metallb_controller_image_repo }}:v{{ metallb_version }}"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -1824,7 +1824,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: secretkey
|
||||
name: memberlist
|
||||
image: "{{ metallb_speaker_image_repo }}:{{ metallb_image_tag }}"
|
||||
image: "{{ metallb_speaker_image_repo }}:v{{ metallb_version }}"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
|
||||
@@ -132,15 +132,6 @@
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Modprobe Kernel Module for nftables
|
||||
community.general.modprobe:
|
||||
name: "nf_tables"
|
||||
state: present
|
||||
persistent: present
|
||||
when: kube_proxy_mode == 'nftables'
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Install kubelet
|
||||
import_tasks: kubelet.yml
|
||||
tags:
|
||||
|
||||
@@ -202,20 +202,13 @@
|
||||
- dashboard_enabled
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if kernel version is too low for cilium
|
||||
- name: Stop if kernel version is too low
|
||||
assert:
|
||||
that: ansible_kernel.split('-')[0] is version('4.9.17', '>=')
|
||||
when:
|
||||
- kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if kernel version is too low for nftables
|
||||
assert:
|
||||
that: ansible_kernel.split('-')[0] is version('5.13', '>=')
|
||||
when:
|
||||
- kube_proxy_mode == 'nftables'
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Stop if bad hostname
|
||||
assert:
|
||||
that: inventory_hostname is match("[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
|
||||
@@ -321,7 +314,7 @@
|
||||
that:
|
||||
- kube_network_plugin in ['calico', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'kube-ovn', 'kube-router', 'macvlan', 'custom_cni', 'none']
|
||||
- dns_mode in ['coredns', 'coredns_dual', 'manual', 'none']
|
||||
- kube_proxy_mode in ['iptables', 'ipvs', 'nftables']
|
||||
- kube_proxy_mode in ['iptables', 'ipvs']
|
||||
- cert_management in ['script', 'none']
|
||||
- resolvconf_mode in ['docker_dns', 'host_resolvconf', 'none']
|
||||
- etcd_deployment_type in ['host', 'docker', 'kubeadm']
|
||||
|
||||
@@ -52,9 +52,6 @@ pkgs:
|
||||
mergerfs:
|
||||
- "{{ ansible_distribution == 'Debian' }}"
|
||||
- "{{ ansible_distribution_major_version == '12' }}"
|
||||
nftables:
|
||||
- "{{ kube_proxy_mode == 'nftables' }}"
|
||||
- "{{ 'k8s_cluster' in group_names }}"
|
||||
nss:
|
||||
- "{{ ansible_os_family == 'RedHat' }}"
|
||||
openssl: []
|
||||
|
||||
@@ -343,8 +343,6 @@ kubeadm_checksums:
|
||||
1.30.0: sha256:a77badcaff292862df8324e17f74ab7ce3c6ea9f390647878f1838a3a832f413
|
||||
etcd_binary_checksums:
|
||||
arm64:
|
||||
3.5.21: sha256:95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f
|
||||
3.5.20: sha256:f034232e6fb64b0d89c45fd78b8b4c3e9fb8d69605dddddcdebf5d7cd96a1531
|
||||
3.5.19: sha256:a786fd2c92c3c0404586ffedf1b318e4944a17aefed1fa6566f5712ddb8359ad
|
||||
3.5.18: sha256:c2bcaf465537d6d47c8bb82a69e31f786f32257050e3bca445bc4e63479ec714
|
||||
3.5.17: sha256:7d717a62520bf39fa1115dfbb1df79479ff74b5eda0914f4132bfa60a48b9549
|
||||
@@ -360,8 +358,6 @@ etcd_binary_checksums:
|
||||
3.5.7: sha256:1a35314900da7db006b198dd917e923459b462128101736c63a3cda57ecdbf51
|
||||
3.5.6: sha256:888e25c9c94702ac1254c7655709b44bb3711ebaabd3cb05439f3dd1f2b51a87
|
||||
amd64:
|
||||
3.5.21: sha256:adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c
|
||||
3.5.20: sha256:9ac85616fb8c0e45f485074dde0258ca2b7b42f1dd5320821af5a8b66daf7072
|
||||
3.5.19: sha256:16ae742def5f330800590e8d505d72830a3b0b7012e559e6bd76f0bc9864bf42
|
||||
3.5.18: sha256:6ddde039a7a506badf34e7edfb38e1ea90e36f05c8cfceba602045df623d86fa
|
||||
3.5.17: sha256:eff6ac621d41711085d0f38fab17d8fa3705f6326c3ff11301a1f5a71fc94edd
|
||||
@@ -377,8 +373,6 @@ etcd_binary_checksums:
|
||||
3.5.7: sha256:a43119af79c592a874e8f59c4f23832297849d0c479338f9df36e196b86bc396
|
||||
3.5.6: sha256:4db32e3bc06dd0999e2171f76a87c1cffed8369475ec7aa7abee9023635670fb
|
||||
ppc64le:
|
||||
3.5.21: sha256:6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c
|
||||
3.5.20: sha256:563bdac64fc92442cf366c02294dff1cbbd3885a86dbcf7f2e87d9388c3b3223
|
||||
3.5.19: sha256:cc8651929f4d5794892eeeabf612a243ea6233125bc5f8b0f711118736e2710f
|
||||
3.5.18: sha256:ad90260978a9a94572c8aedfa3c4ab225a451e84ab01a1df35e4124863672999
|
||||
3.5.17: sha256:5c737b586a1ebcc12bf0d68a2b56583764f4aba82ab4934629626da93d4a9ecc
|
||||
@@ -432,7 +426,6 @@ cni_binary_checksums:
|
||||
1.0.0: sha256:1a055924b1b859c54a97dc14894ecaa9b81d6d949530b9544f0af4173f5a8f2a
|
||||
calicoctl_binary_checksums:
|
||||
arm64:
|
||||
3.29.3: sha256:d6cba570af9162dff56714ac5e22dfdd170742bc58a51211f587875a3de79fc4
|
||||
3.29.2: sha256:3a9b80335338b7f4af762d4a7cf68e67b40839e50711fbe6e67f9a62b69bafdd
|
||||
3.29.1: sha256:6f662d316a267854dc5487242ca7ec8ca70c35b52bed258aafb76c2d113643c2
|
||||
3.29.0: sha256:ab23afb283fcdffcf0e1156cdced68d05b6c2b70fd4ea2cbc3189d0ecd43bdfd
|
||||
@@ -446,7 +439,6 @@ calicoctl_binary_checksums:
|
||||
3.27.2: sha256:0fd1f65a511338cf9940835987d420c94ab95b5386288ba9673b736a4d347463
|
||||
3.27.0: sha256:b4b8c71f9658165e45336b9b5e4fad865529feeffe4294247eb5b4c4310dcaf9
|
||||
amd64:
|
||||
3.29.3: sha256:8101eef6d31ca80db0c64c7ab8930f657dafc1f8696f145ef5d5f162026eedda
|
||||
3.29.2: sha256:6076d6745c4d60c0c4322961cbb256a0ffa8476cf7f8dbe5de4ae82c55bca020
|
||||
3.29.1: sha256:2ac849181cb1fb40c61c06d075711025cdb909d80562d078cc548d50a0edcd3d
|
||||
3.29.0: sha256:df5048549d72a1f7ea4f61c655699d3b16d8a45873f28c3855c39597b73e8a3d
|
||||
@@ -460,7 +452,6 @@ calicoctl_binary_checksums:
|
||||
3.27.2: sha256:692f69dc656e41cd35e23e24f56c98c4aeeb723fed129985b46f71e6eb5e1594
|
||||
3.27.0: sha256:46e79ae146b3dd90998f56511cf5d6db64deb97cb784235caf1f99e0672d66e4
|
||||
ppc64le:
|
||||
3.29.3: sha256:edb98d2a0d3f8afbf98eb000f0d535d4678af39dd6e10a09ea5615a4824f692f
|
||||
3.29.2: sha256:6f3fd72be26fcf52605d9ece716363a73bb194ca59ee34a257156d30fa5c1542
|
||||
3.29.1: sha256:ef6064f2ec1a09b5eb8c43ab0c64bd42785c24f5b22b950583fb5074f472c2b7
|
||||
3.29.0: sha256:c9c2a29a349c6f681aa79b5f5d6aee738305d95aa7f158b6217f487808758e53
|
||||
@@ -652,7 +643,6 @@ cri_dockerd_archive_checksums:
|
||||
0.3.5: sha256:30d47bd89998526d51a8518f9e8ef10baed408ab273879ee0e30350702092938
|
||||
runc_checksums:
|
||||
arm64:
|
||||
1.2.6: sha256:12c612e2ebe6ca198de676ce75ed557e79fe6109032209bb8e25166c967fe170
|
||||
1.2.5: sha256:bfc6575f4c601740539553b639ad6f635c23f76695ed484171bd864df6a23f76
|
||||
1.2.4: sha256:285f6c4c3de1d78d9f536a0299ae931219527b2ebd9ad89df5a1072896b7e82a
|
||||
1.2.3: sha256:4ef19ab21ce1ae5a01e1d3fa5b005e45cdf59f5d3ab32541c9e262cb2b2d3451
|
||||
@@ -668,7 +658,6 @@ runc_checksums:
|
||||
1.1.9: sha256:b43e9f561e85906f469eef5a7b7992fc586f750f44a0e011da4467e7008c33a0
|
||||
1.1.8: sha256:7c22cb618116d1d5216d79e076349f93a672253d564b19928a099c20e4acd658
|
||||
amd64:
|
||||
1.2.6: sha256:0774f49d1b1eebb5849e644db5e4dc6f2b06cee05f13b3d17d5d6ba62d6f2ebc
|
||||
1.2.5: sha256:fbd851fce6a8e0d67a9d184ea544c2abf67c9fd29b80fcc1adf67dfe9eb036a1
|
||||
1.2.4: sha256:e83565aa78ec8f52a4d2b4eb6c4ca262b74c5f6770c1f43670c3029c20175502
|
||||
1.2.3: sha256:e6e8c8049b1910fce58fa68c057aaa5f42cee2a73834df5e59e5da7612d2739d
|
||||
@@ -684,7 +673,6 @@ runc_checksums:
|
||||
1.1.9: sha256:b9bfdd4cb27cddbb6172a442df165a80bfc0538a676fbca1a6a6c8f4c6933b43
|
||||
1.1.8: sha256:1d05ed79854efc707841dfc7afbf3b86546fc1d0b3a204435ca921c14af8385b
|
||||
ppc64le:
|
||||
1.2.6: sha256:0d7fffba4f89920edd3246afd4f07b18a975d0d97193ffae418e8418c236c168
|
||||
1.2.5: sha256:3764385971ac719535425629e1ac4d451934392993779ee9e8e8ed7566715f5f
|
||||
1.2.4: sha256:141fa41c1f382483ccf374827f99c7843414fceb95e8ceb710aba8bac984d016
|
||||
1.2.3: sha256:6d1b771096000a14faae660465faf9626a76afe994cbe60581ec4eac1718f12d
|
||||
@@ -757,8 +745,6 @@ kata_containers_binary_checksums:
|
||||
3.0.1: sha256:e2505482f68cc1b1417b8011f2755bf87171a8dd6daaace28531746118fbddaa
|
||||
gvisor_runsc_binary_checksums:
|
||||
arm64:
|
||||
'20250326.0': sha512:6946a6a82990fdf39312f63fdf421cec048eea55019fe9ff47df9a5988ee1a8340976d187623201a9f6fd44e3b23cadcc413f32364861382c907055d774c46ae
|
||||
'20250319.0': sha512:8bc6cd22af7e682920d3de134836bd1b787379bf447ccbf554e9f3fc7272aa6650dd9aca52cc36d85910c8bfe40c6d76e11c40518617b91868aeefeb938cb2cd
|
||||
'20250304.0': sha512:3e069ef4a4879747ca044b23babbc231367f5dcb36f2755e297d9bef8b8a649f6a05495b43f662a636a92d81312d0eb572663e34f164010b23dda7a707bee3d6
|
||||
'20250224.0': sha512:474ccbb94bdbe5d4215b05c27db922d9609af16dc0375bc38d32b0dd07e4f8d1dc2efacea80349175c24b5ed31a2ce51e6735b167a6c74c0a4ee93143d2bb286
|
||||
'20250217.0': sha512:3f7e01042a62a800efaff48e9faadfdcfa65e42469795bc0d44e42762b476262e1763fa9aa451865432f077a7d13117c0a520947c0954b489193967e1541441c
|
||||
@@ -793,8 +779,6 @@ gvisor_runsc_binary_checksums:
|
||||
'20240109.0': sha256:51a1b299997834b902192806def688b1e23ff6b14f28a9ed3397f3f6572a189a
|
||||
'20231218.0': sha256:86262a78946deacc309c0f08883659ee3298c288048dc30955945e71993c81a8
|
||||
amd64:
|
||||
'20250326.0': sha512:1bc7c309c6400f1a700d7bcc6901db632a3e3252ff1313725b9b538dbba8e974fb6789dfc2427f98f70757a4125a29b87c6bf80dd730fc6d07e330bdd72fda40
|
||||
'20250319.0': sha512:ffbb13d965f7da7ef76fdee02ae8473c28252f896680f589f8363914ed9fc0ccfb21ec616595354baee60bdb06847c86b5d6df058f498f9c9bc0e0683fdd335d
|
||||
'20250304.0': sha512:b43e386a95dd91f6c5af72c303be8d37b0e0fef60f199916b21760c4e3582865c483bb86d67d60e68342f7512da73bfaa270ff06c5421912ba8c84f272ade48d
|
||||
'20250224.0': sha512:9cfff30e4609c0b6fd9719c087827fc9527b2ed64972242937e9c178e85ab7b63ae4a7262e4423a6e7965fbf5c5340a4e536adc35e06493ae9a4aa4d25884f3d
|
||||
'20250217.0': sha512:8c61ddb4efc93ce8a6e0da0d270940912db3727dda391d1ab4b8bfaa67c42e5b177195233188858a800d0e8fbe01a4c500aefa1382d8200b15e6f7e84f91289e
|
||||
@@ -830,8 +814,6 @@ gvisor_runsc_binary_checksums:
|
||||
'20231218.0': sha256:c353d36a134dfc2fab8509f72a34abf6a761603975eb00a39e4077c41aeaf31b
|
||||
gvisor_containerd_shim_binary_checksums:
|
||||
arm64:
|
||||
'20250326.0': sha512:ae89ee8b18f42da15f02c4cb5633679f21bd7e865392b57ab8fda215d3775caf04d447098439826f3257dbde28361febcefe89a43937db0dfaae80b4f914686c
|
||||
'20250319.0': sha512:a8d6b57941be358156ffe729a92df48be3d4f94d3b2c61e19a1bff2dd69aa1220286599b2b2adf456612ddaddc8f812fca82a0aa1571d7ecc07339b4f106206c
|
||||
'20250304.0': sha512:3e663aa9fb30fc144ffc02eff1e2104ff9e622dadbba73176933ecacf94372d88dea15e18bbdfb9233224bbb03cdb7fdaaa1ace02c1a7feb9c07b0c0571fab28
|
||||
'20250224.0': sha512:6dc610af5a9d974ae416bc4b37e066c513d63566c1f2db157acc61d73bab5e3e589671c111fba2943b88a6cef344f930a2dbc64533ba828c3f2c505a02b19f9a
|
||||
'20250217.0': sha512:6dc610af5a9d974ae416bc4b37e066c513d63566c1f2db157acc61d73bab5e3e589671c111fba2943b88a6cef344f930a2dbc64533ba828c3f2c505a02b19f9a
|
||||
@@ -866,8 +848,6 @@ gvisor_containerd_shim_binary_checksums:
|
||||
'20240109.0': sha256:40eb0a4f5f0013afb221e228fd6e71887127c4b09c7f2eb36705a0cd5c746d57
|
||||
'20231218.0': sha256:5f66938de981221359a64f05a5c770b228090db3a2697d91ad622c18dd19f4b2
|
||||
amd64:
|
||||
'20250326.0': sha512:186c188f722ccb249429083bec31b9879b5f52c42b612e0819dfa84d7765bdc79f8305b6e4af9f9344a3cfaacfb1f650fd1be716d93e8b6c596167d6eed1f4df
|
||||
'20250319.0': sha512:f96512d761656bc3101e46ed5d8f06a5d53ca000c0482af3a0286b6743bac698beb98460b052e393e853c7321562942ff602fa9e7eec5c0979b2621df51a72fc
|
||||
'20250304.0': sha512:124be185c421f4620417f21e3a9c50569b6310aeb296dc242fa98ceb403f2990aac77d507690fcdd7d200a861f7c95ee44cfbaa6c59155b23ff66a9dbf25994f
|
||||
'20250224.0': sha512:cd1d0dec2cc8789d62e7fcc3823f2b72907b8c264e785a42e46e0e9cb2b2bf54f558c490a7bdd18d79906c967c4433212bb028d99d4953780a218f6f26f40389
|
||||
'20250217.0': sha512:cd1d0dec2cc8789d62e7fcc3823f2b72907b8c264e785a42e46e0e9cb2b2bf54f558c490a7bdd18d79906c967c4433212bb028d99d4953780a218f6f26f40389
|
||||
@@ -903,7 +883,6 @@ gvisor_containerd_shim_binary_checksums:
|
||||
'20231218.0': sha256:a0578a357feb9320298730bf5ba683880ba35c476dc74dc82c79f0b5acc42656
|
||||
nerdctl_archive_checksums:
|
||||
arm:
|
||||
2.0.4: sha256:89e540cb1ac0ed37ec50afc578970a0c9b6a7f1c1b684368da9a726259a3d359
|
||||
2.0.3: sha256:d95f238738623ae1f4fb01b6a7f287436ba85493700a9de263b3efbff57424d4
|
||||
2.0.2: sha256:910619da11b90d71758e6843543ab2106c20b5149f353289bd6d553151b540ca
|
||||
2.0.1: sha256:8f42611dc1554b29dfe990f058ed12920be9cdd78798dcfc6b3845e613eb1252
|
||||
@@ -917,7 +896,6 @@ nerdctl_archive_checksums:
|
||||
1.7.1: sha256:799d35de7a182da35d850308c7f1787cd7321404348ff2d5ba64ad43b06b395a
|
||||
1.7.0: sha256:8b9e7cccbcc0a472685d1bc285f591f41005f8699e7265ea5438a3e06aefdcfd
|
||||
arm64:
|
||||
2.0.4: sha256:1f394e3aabc2b202ad17a5ece0495ca554ae3ca346a5dd0aa8344a891734763d
|
||||
2.0.3: sha256:f2c3f12c99e112cd82ba19ca9b875045c44b2f5a19cecc295ed8d61d415e8851
|
||||
2.0.2: sha256:c50ba98be0ef05684948f7873078558504a7cc46ff92cffc764c1625b1cd0d40
|
||||
2.0.1: sha256:cbca59744f6e9dea962e1d3a754294b5e64b53b82f4f7f7d603a591f38545fd5
|
||||
@@ -931,7 +909,6 @@ nerdctl_archive_checksums:
|
||||
1.7.1: sha256:46affa0564bb74f595a817e7d5060140099d9cfd9e00e1272b4dbe8b0b85c655
|
||||
1.7.0: sha256:1255eea5bc2dbac9339d0a9acfb0651dda117504d52cd52b38cf3c2251db4f39
|
||||
amd64:
|
||||
2.0.4: sha256:2f9d22179868db4f0a1daf2fd65f58a24f8a78efd2d9b17659f56bcdce85efd0
|
||||
2.0.3: sha256:95ff850688a73eace7453f19e74bf4cc8a1f3e458eeb97ef7a6b74de9825df16
|
||||
2.0.2: sha256:1ba015dba039cf6ec2434e88d97707f0b715790e6b7f2e7b6ff7be9200f47bc1
|
||||
2.0.1: sha256:96e5e3ed79f189a986cd33a40b0c817d7b6c7d9238f51a0737213f409e5d82af
|
||||
@@ -945,7 +922,6 @@ nerdctl_archive_checksums:
|
||||
1.7.1: sha256:5fc0a6e8c3a71cbba95fbdb6833fb8a7cd8e78f53de10988362d4029c14b905a
|
||||
1.7.0: sha256:844c47b175a3d6bc8eaad0c51f23624a5ef10c09e55607803ec2bc846fb04df9
|
||||
ppc64le:
|
||||
2.0.4: sha256:93c5bd5f32a3b821fd462a96c6943b06663ff03e8fb16327957fe99576855116
|
||||
2.0.3: sha256:8d6283b1fe871e319a2f5cf96fe97aba649eeaac0a2a22c81b9b4d3c613c210a
|
||||
2.0.2: sha256:1baed7f4312404da966155856aa1e4b4f48bae73d64fd2cf6c41ef9326a07b10
|
||||
2.0.1: sha256:78a3846cacc570e8ee4a1d60928a55954fb4fd1b3b731c0c975a808134166fab
|
||||
@@ -960,12 +936,10 @@ nerdctl_archive_checksums:
|
||||
1.7.0: sha256:e421ae655ff68461bad04b4a1a0ffe40c6f0fcfb0847d5730d66cd95a7fd10cd
|
||||
containerd_archive_checksums:
|
||||
arm64:
|
||||
2.0.4: sha256:0fde98b24bb55363a54150732e0ac99a43bccf2a9711371bd5470f32790316f2
|
||||
2.0.3: sha256:3701008e72e983259afaa594cca5d8126e78e38cf0a586a1f6971cb3f61c4b6b
|
||||
2.0.2: sha256:14a2a9f7f75f73e5bcfb8b183d0b84830c54b98ef8c5f6ed70e51f1a230c673e
|
||||
2.0.1: sha256:b07120ae227b52edfdb54131d44b13b987b39e8c1f740b0c969b7701e0fad4fa
|
||||
2.0.0: sha256:2a00b1553f38aa9e716d61316b661961c2fbfbb7aad7bd73b377be5725ecc0f1
|
||||
1.7.27: sha256:3f03ea60c7dacddf890be3ab18f7ef859d9d104b19627f52038d7984361912bc
|
||||
1.7.26: sha256:adea067914e678ac37d5091ead66f1e36e5cced4d395bbd2be60772495e09eff
|
||||
1.7.25: sha256:e9201d478e4c931496344b779eb6cb40ce5084ec08c8fff159a02cabb0c6b9bf
|
||||
1.7.24: sha256:420406d2b34ebb422ab3755fbeede59bf3bfcfccf5cfa584b558c93769d99064
|
||||
@@ -993,7 +967,6 @@ containerd_archive_checksums:
|
||||
1.7.2: sha256:d75a4ca53d9addd0b2c50172d168b12957e18b2d8b802db2658f2767f15889a6
|
||||
1.7.1: sha256:1f828dc063e3c24b0840b284c5635b5a11b1197d564c97f9e873b220bab2b41b
|
||||
1.7.0: sha256:e7e5be2d9c92e076f1e2e15c9f0a6e0609ddb75f7616999b843cba92d01e4da2
|
||||
1.6.38: sha256:8a685abe7e5352baab1511933431195964e893e19b4a229af48c7a32ee50ab58
|
||||
1.6.37: sha256:c3da773c40a2e509c13a55fbd25a165c5dfaed7c9b67a71bb4033c3d8d2d0b6a
|
||||
1.6.36: sha256:48aaf746ad4adc6e5c3b077875ddbd15a8f5b660a5f7dcb533f0205aeeff3785
|
||||
1.6.35: sha256:0e0066aeffbd4360bfcf16bd08b6a9e40da7f437aa7b292991ce8d08083bee40
|
||||
@@ -1018,12 +991,10 @@ containerd_archive_checksums:
|
||||
1.6.15: sha256:d63e4d27c51e33cd10f8b5621c559f09ece8a65fec66d80551b36cac9e61a07d
|
||||
1.6.14: sha256:3ccb61218e60cbba0e1bbe1e5e2bf809ac1ead8eafbbff36c3195d3edd0e4809
|
||||
amd64:
|
||||
2.0.4: sha256:e1c64c5fd60ecd555e750744eaef150b6f78d7f750da5c08c52825aa6b791737
|
||||
2.0.3: sha256:ac70856f1d8bd3aa9ca5d62db5516b86dfa0f934c1fd1d1c5fa4422dd12ba45e
|
||||
2.0.2: sha256:9bd5b6a1bdf505d520d9a329c520258ed0a17faa9fe3db12712ee858ad59aae3
|
||||
2.0.1: sha256:85061a5ce1b306292d5a64f85d5cd3aff93d0982737a1069d370dd6cb7bbfd09
|
||||
2.0.0: sha256:6f8da716941f7e89315cefaa6e5a8f1ff10b323ff46611313c455df7ab1ebee1
|
||||
1.7.27: sha256:5b038fb22ab5dbb1ce57dd3d8f102460cd8619ff2afc78870837b06e8c4e840a
|
||||
1.7.26: sha256:fdf1fb17086b62fc861103da4e3fda3d79bc543b42d2acef5d07e76b13d35d19
|
||||
1.7.25: sha256:02990fa281c0a2c4b073c6d2415d264b682bd693aa7d86c5d8eb4b86d684a18c
|
||||
1.7.24: sha256:1a94f15139f37633f39e24f08a4071f4533b285df3cbee6478972d26147bcaef
|
||||
@@ -1051,7 +1022,6 @@ containerd_archive_checksums:
|
||||
1.7.2: sha256:2755c70152ab40856510b4549c2dd530e15f5355eb7bf82868e813c9380e22a7
|
||||
1.7.1: sha256:9504771bcb816d3b27fab37a6cf76928ee5e95a31eb41510a7d10ae726e01e85
|
||||
1.7.0: sha256:b068b05d58025dc9f2fc336674cac0e377a478930f29b48e068f97c783a423f0
|
||||
1.6.38: sha256:84f6098c96ff4afc6add67fe4dffc2bb206c86f4b8ceea2157124e4c328697a0
|
||||
1.6.37: sha256:fd74db561289cede7efcede1dce7da92a7da025b4ca8b2c36fc5559ab1892089
|
||||
1.6.36: sha256:e9a53f5f7549afbe9208578609eddecd238b7166663ab273f2954fab77602b3f
|
||||
1.6.35: sha256:50f05b2986a8635827e3f015f10d8d1c342e9a9d6886c7392160b5c27ac77c83
|
||||
@@ -1076,12 +1046,10 @@ containerd_archive_checksums:
|
||||
1.6.15: sha256:191bb4f6e4afc237efc5c85b5866b6fdfed731bde12cceaa6017a9c7f8aeda02
|
||||
1.6.14: sha256:7da626d46c4edcae1eefe6d48dc6521db3e594a402715afcddc6ac9e67e1bfcd
|
||||
ppc64le:
|
||||
2.0.4: sha256:ca970d9a53ae504bc36197d6daa931338c387c83b6948b9f9bfdd1a75e25dcf6
|
||||
2.0.3: sha256:2f0faa0086ae81d00680367ee9d75aafd3c4ca4535362db83fea62dd19c47079
|
||||
2.0.2: sha256:1b19d31bb8a7f9d26d9b50675e78f397d0b01fa635c33cca456f91c412fa6df1
|
||||
2.0.1: sha256:09a25357343c7336fe519e5fd1a9dd0f22da869e9deda50c2bc61b6e8c9384be
|
||||
2.0.0: sha256:2e7f4b15ac85c22c1ced102bbb424124078248f0af3183425ff335a998079809
|
||||
1.7.27: sha256:ccdfa16e4bba3a993d74fac794d22ddadc1013d351cd099ea933827050ef05a0
|
||||
1.7.26: sha256:34a86b1bd598b34e8c05956c5976fb0c0b347937d3cd0837edbcebc7f9e7e53f
|
||||
1.7.25: sha256:0934176e32eace1c23dcb9edff0e78f872bf8f7152b5e6f622e9ccf1ddce8722
|
||||
1.7.24: sha256:2ca4d527dac68132a2a6b3971d82ddfd18edc7fa838b7cfcfe6eb11efd017871
|
||||
@@ -1109,7 +1077,6 @@ containerd_archive_checksums:
|
||||
1.7.2: sha256:cbe7ec913cb603ca218bd8867efdce4bee3b0e0115e467e51c910467daf8184e
|
||||
1.7.1: sha256:17d97ef55c6ce7af9778dbafb5e73f577d1b34220043a91cccde49dbcc610342
|
||||
1.7.0: sha256:051e897d3ee5b8c8097f65be447fea2d29226b583ca5d9ed78e9aebcf4e69889
|
||||
1.6.38: sha256:8971075ef3ab09a478b57342438f5864984b0d38ecd7f432e295727e2035fe4e
|
||||
1.6.37: sha256:de4d165271e04b2fabf4c4c6ad5be81dc121c1b79818e43b37d1807e1932b981
|
||||
1.6.36: sha256:8978cd8bcd4d5a2640bad26d8ea522a46847b6e4a62da1b07bfa482c8906e5ce
|
||||
1.6.35: sha256:99095ab778f6fd532eb01d11771e7f8de8383ef20a00ec536c0cf9c018895115
|
||||
|
||||
@@ -119,7 +119,7 @@ cilium_enable_hubble: false
|
||||
|
||||
kube_ovn_version: "1.12.21"
|
||||
kube_ovn_dpdk_version: "19.11-v{{ kube_ovn_version }}"
|
||||
kube_router_version: "2.1.1"
|
||||
kube_router_version: "2.0.0"
|
||||
multus_version: "4.1.0"
|
||||
helm_version: "{{ (helm_archive_checksums['amd64'] | dict2items)[0].key }}"
|
||||
nerdctl_version: "{{ (nerdctl_archive_checksums['amd64'] | dict2items)[0].key }}"
|
||||
@@ -323,13 +323,13 @@ rbd_provisioner_image_tag: "v{{ rbd_provisioner_version }}"
|
||||
local_path_provisioner_version: "0.0.24"
|
||||
local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
|
||||
local_path_provisioner_image_tag: "v{{ local_path_provisioner_version }}"
|
||||
ingress_nginx_version: "1.12.1"
|
||||
ingress_nginx_version: "1.12.0"
|
||||
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
|
||||
ingress_nginx_opentelemetry_image_repo: "{{ kube_image_repo }}/ingress-nginx/opentelemetry"
|
||||
ingress_nginx_controller_image_tag: "v{{ ingress_nginx_version }}"
|
||||
ingress_nginx_opentelemetry_image_tag: "v20230721-3e2062ee5"
|
||||
ingress_nginx_kube_webhook_certgen_image_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen"
|
||||
ingress_nginx_kube_webhook_certgen_image_tag: "v1.5.2"
|
||||
ingress_nginx_kube_webhook_certgen_image_tag: "v1.5.0"
|
||||
alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller"
|
||||
alb_ingress_image_tag: "v1.1.9"
|
||||
cert_manager_version: "1.15.3"
|
||||
@@ -398,7 +398,6 @@ dashboard_metrics_scraper_tag: "v1.0.8"
|
||||
metallb_speaker_image_repo: "{{ quay_image_repo }}/metallb/speaker"
|
||||
metallb_controller_image_repo: "{{ quay_image_repo }}/metallb/controller"
|
||||
metallb_version: 0.13.9
|
||||
metallb_image_tag: "v{{ metallb_version }}"
|
||||
|
||||
node_feature_discovery_version: 0.16.4
|
||||
node_feature_discovery_image_repo: "{{ kube_image_repo }}/nfd/node-feature-discovery"
|
||||
@@ -1113,7 +1112,7 @@ downloads:
|
||||
enabled: "{{ metallb_speaker_enabled }}"
|
||||
container: true
|
||||
repo: "{{ metallb_speaker_image_repo }}"
|
||||
tag: "{{ metallb_image_tag }}"
|
||||
tag: "{{ metallb_version }}"
|
||||
checksum: "{{ metallb_speaker_digest_checksum | default(None) }}"
|
||||
groups:
|
||||
- kube_control_plane
|
||||
@@ -1122,7 +1121,7 @@ downloads:
|
||||
enabled: "{{ metallb_enabled }}"
|
||||
container: true
|
||||
repo: "{{ metallb_controller_image_repo }}"
|
||||
tag: "{{ metallb_image_tag }}"
|
||||
tag: "{{ metallb_version }}"
|
||||
checksum: "{{ metallb_controller_digest_checksum | default(None) }}"
|
||||
groups:
|
||||
- kube_control_plane
|
||||
|
||||
@@ -132,13 +132,6 @@ images:
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
opensuse-leap-15-6:
|
||||
filename: openSUSE-Leap-15.6.x86_64-1.0.1-NoCloud-Build1.177.qcow2
|
||||
url: https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.6/images/openSUSE-Leap-15.6.x86_64-1.0.1-NoCloud-Build1.177.qcow2
|
||||
checksum: sha256:9ecd197b34faf1b43627946d0c26e38b5c3058207d1c86c4784b8f765c3289f3
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
openeuler-2203:
|
||||
filename: openEuler-22.03-LTS-SP4-x86_64.qcow2.xz
|
||||
url: https://mirrors.ocf.berkeley.edu/openeuler/openEuler-22.03-LTS-SP4/virtual_machine_img/x86_64/openEuler-22.03-LTS-SP4-x86_64.qcow2.xz
|
||||
|
||||
@@ -10,8 +10,6 @@ dashboard_enabled: true
|
||||
loadbalancer_apiserver_type: haproxy
|
||||
local_path_provisioner_enabled: true
|
||||
|
||||
kube_proxy_mode: nftables
|
||||
|
||||
# NTP mangement
|
||||
ntp_enabled: true
|
||||
ntp_timezone: Etc/UTC
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: opensuse-leap-15-6
|
||||
cloud_image: opensuse-leap-15
|
||||
|
||||
# Kubespray settings
|
||||
kube_network_plugin: cilium
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: opensuse-leap-15-6
|
||||
|
||||
kube_proxy_mode: nftables
|
||||
@@ -8,7 +8,7 @@ vm_memory: 1800
|
||||
auto_renew_certificates: true
|
||||
|
||||
# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=noble&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
|
||||
kube_proxy_mode: nftables
|
||||
kube_proxy_mode: iptables
|
||||
enable_nodelocaldns: false
|
||||
|
||||
containerd_registries:
|
||||
|
||||
Reference in New Issue
Block a user