mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-04 08:48:42 +03:00
Compare commits
18 Commits
d1bf1a26ac
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
422e7366ec | ||
|
|
bf69e67240 | ||
|
|
c5c2cf16a0 | ||
|
|
69e042bd9e | ||
|
|
20da3bb1b0 | ||
|
|
4d4058ee8e | ||
|
|
f071fccc33 | ||
|
|
70daea701a | ||
|
|
3e42b84e94 | ||
|
|
868ff3cea9 | ||
|
|
0b69a18e35 | ||
|
|
e30076016c | ||
|
|
f4ccdb5e72 | ||
|
|
fcecaf6943 | ||
|
|
37f7a86014 | ||
|
|
fff7f10a85 | ||
|
|
dc09298f7e | ||
|
|
680db0c921 |
@@ -20,7 +20,7 @@ jobs:
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 2, # TODO increment once we have release branch with the new checksums format
|
||||
first: 3,
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
|
||||
@@ -37,7 +37,6 @@ terraform_validate:
|
||||
- hetzner
|
||||
- vsphere
|
||||
- upcloud
|
||||
- nifcloud
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
@@ -29,7 +29,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
@@ -22,7 +22,7 @@ Ensure you have installed Docker then
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -118,7 +118,7 @@ Note:
|
||||
- [cri-o](http://cri-o.io/) 1.34.4 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.5
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
|
||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
sample-inventory/inventory.ini
|
||||
@@ -1,138 +0,0 @@
|
||||
# Kubernetes on NIFCLOUD with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+----------------------------+
|
||||
+---------------+ | +--------------------+ |
|
||||
| | | | +--------------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Control Plane/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------------+ |
|
||||
| | +--------------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
+----------------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 1.3.7
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Export Variables
|
||||
|
||||
* Your NIFCLOUD credentials:
|
||||
|
||||
```bash
|
||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||
```
|
||||
|
||||
* The SSH KEY used to connect to the instance:
|
||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||
|
||||
```bash
|
||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||
```
|
||||
|
||||
* The IP address to connect to bastion server:
|
||||
|
||||
```bash
|
||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||
```
|
||||
|
||||
### Create The Infrastructure
|
||||
|
||||
* Run terraform:
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||
```
|
||||
|
||||
### Setup The Kubernetes
|
||||
|
||||
* Generate cluster configuration file:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
```
|
||||
|
||||
* Export Variables:
|
||||
|
||||
```bash
|
||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||
```
|
||||
|
||||
* Set ssh-agent"
|
||||
|
||||
```bash
|
||||
eval `ssh-agent`
|
||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||
```
|
||||
|
||||
* Run cluster.yml playbook:
|
||||
|
||||
```bash
|
||||
cd ./../../../
|
||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
### Connecting to Kubernetes
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||
* Fetching kubeconfig file:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.kube
|
||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
```
|
||||
|
||||
* Rewrite /etc/hosts
|
||||
|
||||
```bash
|
||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Run kubectl
|
||||
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `region`: Region where to run the cluster
|
||||
* `az`: Availability zone where to run the cluster
|
||||
* `private_ip_bn`: Private ip address of bastion server
|
||||
* `private_network_cidr`: Subnet of private network
|
||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||
* `instance_type_bn`: The instance type of bastion server
|
||||
* `instance_type_wk`: The instance type of worker node
|
||||
* `instance_type_cp`: The instance type of control plane
|
||||
* `image_name`: OS image used for the instance
|
||||
* `working_instance_ip`: The IP address to connect to bastion server
|
||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
TF_OUT=$(terraform output -json)
|
||||
|
||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo "[all]"
|
||||
# Generate control plane hosts
|
||||
i=1
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo ""
|
||||
echo "[all:vars]"
|
||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
@@ -1,36 +0,0 @@
|
||||
provider "nifcloud" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
module "kubernetes_cluster" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
availability_zone = var.az
|
||||
prefix = "dev"
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
|
||||
instance_key_name = var.instance_key_name
|
||||
instances_cp = var.instances_cp
|
||||
instances_wk = var.instances_wk
|
||||
image_name = var.image_name
|
||||
|
||||
instance_type_bn = var.instance_type_bn
|
||||
instance_type_cp = var.instance_type_cp
|
||||
instance_type_wk = var.instance_type_wk
|
||||
|
||||
private_ip_bn = var.private_ip_bn
|
||||
|
||||
additional_lb_filter = [var.working_instance_ip]
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
module.kubernetes_cluster.security_group_name.bastion
|
||||
]
|
||||
type = "IN"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_ip = var.working_instance_ip
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
locals {
|
||||
# e.g. east-11 is 11
|
||||
az_num = reverse(split("-", var.availability_zone))[0]
|
||||
# e.g. east-11 is e11
|
||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||
|
||||
# Port used by the protocol
|
||||
port_ssh = 22
|
||||
port_kubectl = 6443
|
||||
port_kubelet = 10250
|
||||
|
||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||
port_bgp = 179
|
||||
port_vxlan = 4789
|
||||
port_etcd = 2379
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
# data
|
||||
data "nifcloud_image" "this" {
|
||||
image_name = var.image_name
|
||||
}
|
||||
|
||||
# private lan
|
||||
resource "nifcloud_private_lan" "this" {
|
||||
private_lan_name = "${var.prefix}lan"
|
||||
availability_zone = var.availability_zone
|
||||
cidr_block = var.private_network_cidr
|
||||
accounting_type = var.accounting_type
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Bastion
|
||||
##
|
||||
resource "nifcloud_security_group" "bn" {
|
||||
group_name = "${var.prefix}bn"
|
||||
description = "${var.prefix} bastion"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "bn" {
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||
security_group = nifcloud_security_group.bn.group_name
|
||||
instance_type = var.instance_type_bn
|
||||
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = var.private_ip_bn
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Control Plane
|
||||
##
|
||||
resource "nifcloud_security_group" "cp" {
|
||||
group_name = "${var.prefix}cp"
|
||||
description = "${var.prefix} control plane"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "cp" {
|
||||
for_each = var.instances_cp
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.cp.group_name
|
||||
instance_type = var.instance_type_cp
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nifcloud_load_balancer" "this" {
|
||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||
accounting_type = var.accounting_type
|
||||
balancing_type = 1 // Round-Robin
|
||||
load_balancer_port = local.port_kubectl
|
||||
instance_port = local.port_kubectl
|
||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||
filter = concat(
|
||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||
var.additional_lb_filter,
|
||||
)
|
||||
filter_type = 1 // Allow
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
resource "nifcloud_security_group" "wk" {
|
||||
group_name = "${var.prefix}wk"
|
||||
description = "${var.prefix} worker"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "wk" {
|
||||
for_each = var.instances_wk
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.wk.group_name
|
||||
instance_type = var.instance_type_wk
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: Kubernetes
|
||||
##
|
||||
|
||||
# ssh
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_ssh
|
||||
to_port = local.port_ssh
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||
}
|
||||
|
||||
# kubectl
|
||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubectl
|
||||
to_port = local.port_kubectl
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# kubelet
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: calico
|
||||
##
|
||||
|
||||
# vslan
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# bgp
|
||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_etcd
|
||||
to_port = local.port_etcd
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
output "control_plane_lb" {
|
||||
description = "The DNS name of LB for control plane"
|
||||
value = nifcloud_load_balancer.this.dns_name
|
||||
}
|
||||
|
||||
output "security_group_name" {
|
||||
description = "The security group used in the cluster"
|
||||
value = {
|
||||
bastion = nifcloud_security_group.bn.group_name,
|
||||
control_plane = nifcloud_security_group.cp.group_name,
|
||||
worker = nifcloud_security_group.wk.group_name,
|
||||
}
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
description = "The private network used in the cluster"
|
||||
value = nifcloud_private_lan.this.id
|
||||
}
|
||||
|
||||
output "bastion_info" {
|
||||
description = "The basion information in cluster"
|
||||
value = { (nifcloud_instance.bn.instance_id) : {
|
||||
instance_id = nifcloud_instance.bn.instance_id,
|
||||
unique_id = nifcloud_instance.bn.unique_id,
|
||||
private_ip = nifcloud_instance.bn.private_ip,
|
||||
public_ip = nifcloud_instance.bn.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "worker_info" {
|
||||
description = "The worker information in cluster"
|
||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "control_plane_info" {
|
||||
description = "The control plane information in cluster"
|
||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################
|
||||
##
|
||||
## IP Address
|
||||
##
|
||||
configure_private_ip_address () {
|
||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: yes
|
||||
dhcp6: yes
|
||||
dhcp-identifier: mac
|
||||
ens224:
|
||||
dhcp4: no
|
||||
dhcp6: no
|
||||
addresses: [${private_ip_address}]
|
||||
EOS
|
||||
netplan apply
|
||||
}
|
||||
configure_private_ip_address
|
||||
|
||||
#################################################
|
||||
##
|
||||
## SSH
|
||||
##
|
||||
configure_ssh_port () {
|
||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||
}
|
||||
configure_ssh_port
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Hostname
|
||||
##
|
||||
hostnamectl set-hostname ${hostname}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||
##
|
||||
systemctl mask "dev-sda3.swap"
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = ">= 1.8.0, < 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
variable "availability_zone" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "The prefix for the entire cluster"
|
||||
type = string
|
||||
validation {
|
||||
condition = length(var.prefix) <= 5
|
||||
error_message = "Must be a less than 5 character long."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "additional_lb_filter" {
|
||||
description = "Additional LB filter"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "1"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "kubernetes_cluster" {
|
||||
value = module.kubernetes_cluster
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
region = "jp-west-1"
|
||||
az = "west-11"
|
||||
|
||||
instance_key_name = "deployerkey"
|
||||
|
||||
instance_type_bn = "e-medium"
|
||||
instance_type_cp = "e-medium"
|
||||
instance_type_wk = "e-medium"
|
||||
|
||||
private_network_cidr = "192.168.30.0/24"
|
||||
instances_cp = {
|
||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||
}
|
||||
instances_wk = {
|
||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||
}
|
||||
private_ip_bn = "192.168.30.10/24"
|
||||
|
||||
image_name = "Ubuntu Server 22.04 LTS"
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = "1.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
variable "region" {
|
||||
description = "The region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "az" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "working_instance_ip" {
|
||||
description = "The IP address to connect to bastion server."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "2"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -1006,7 +1006,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
||||
image_id = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
||||
flavor_id = var.flavor_gfs_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
@@ -1078,7 +1078,7 @@ resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
|
||||
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
resource "openstack_blockstorage_volume_v3" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
@@ -1088,5 +1088,5 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
|
||||
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
|
||||
volume_id = element(openstack_blockstorage_volume_v3.glusterfs_volume.*.id, count.index)
|
||||
}
|
||||
|
||||
@@ -193,11 +193,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.29.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.29.0
|
||||
git checkout v2.30.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.30.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
namespace: kubernetes_sigs
|
||||
description: Deploy a production ready Kubernetes cluster
|
||||
name: kubespray
|
||||
version: 2.30.0
|
||||
version: 2.31.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||
|
||||
@@ -56,8 +56,8 @@ cilium_l2announcements: false
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
# cilium_monitor_aggregation_flags: "all"
|
||||
# Kube Proxy Replacement mode (strict/partial)
|
||||
# cilium_kube_proxy_replacement: partial
|
||||
# Kube Proxy Replacement mode (true/false)
|
||||
# cilium_kube_proxy_replacement: false
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
@@ -27,14 +27,14 @@ RUN apt update -q \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg2 \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
libvirt-clients \
|
||||
qemu-utils \
|
||||
qemu-kvm \
|
||||
dnsmasq \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | tee /etc/apt/sources.list.d/docker.list \
|
||||
&& apt update -q \
|
||||
&& apt install --no-install-recommends -yq docker-ce \
|
||||
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
|
||||
@@ -44,9 +44,8 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||
|
||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
&& vagrant plugin install vagrant-libvirt \
|
||||
# Install Kubernetes collections
|
||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||
&& ansible-galaxy collection install kubernetes.core
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
ansible==10.7.0
|
||||
# Needed for community.crypto module
|
||||
cryptography==46.0.3
|
||||
cryptography==46.0.4
|
||||
# Needed for jinja2 json_query templating
|
||||
jmespath==1.0.1
|
||||
jmespath==1.1.0
|
||||
# Needed for ansible.utils.ipaddr
|
||||
netaddr==1.3.0
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
delay: "{{ retry_stagger }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
@@ -128,7 +128,7 @@
|
||||
register: docker_task_result
|
||||
until: docker_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
delay: "{{ retry_stagger }}"
|
||||
notify: Restart docker
|
||||
when:
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
|
||||
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
|
||||
file:
|
||||
@@ -145,15 +144,6 @@
|
||||
- ('k8s_cluster' in group_names) and
|
||||
sync_certs | default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
file:
|
||||
path: "{{ etcd_cert_dir }}"
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
|
||||
# This is a hack around the fact kubeadm expect the same certs path on all kube_control_plane
|
||||
# TODO: fix certs generation to have the same file everywhere
|
||||
# OR work with kubeadm on node-specific config
|
||||
|
||||
@@ -18,7 +18,6 @@ etcd_backup_retention_count: -1
|
||||
force_etcd_cert_refresh: true
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
||||
etcd_cert_dir_mode: "0700"
|
||||
etcd_cert_group: root
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
|
||||
@@ -8,3 +8,4 @@ local_path_provisioner_is_default_storageclass: "true"
|
||||
local_path_provisioner_debug: false
|
||||
local_path_provisioner_helper_image_repo: "busybox"
|
||||
local_path_provisioner_helper_image_tag: "latest"
|
||||
local_path_provisioner_resources: {}
|
||||
|
||||
@@ -35,6 +35,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{% if local_path_provisioner_resources %}
|
||||
resources:
|
||||
{{ local_path_provisioner_resources | to_nice_yaml | indent(10) | trim }}
|
||||
{% endif %}
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
# disable upgrade cluster
|
||||
upgrade_cluster_setup: false
|
||||
|
||||
# Number of retries (with 5 seconds interval) to check that new control plane nodes
|
||||
# are in Ready condition after joining
|
||||
control_plane_node_become_ready_tries: 24
|
||||
# By default the external API listens on all interfaces, this can be changed to
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
|
||||
@@ -98,3 +98,18 @@
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for new control plane nodes to be Ready
|
||||
when: kubeadm_already_run.stat.exists
|
||||
run_once: true
|
||||
command: >
|
||||
{{ kubectl }} get nodes --selector node-role.kubernetes.io/control-plane
|
||||
-o jsonpath-as-json="{.items[*].status.conditions[?(@.type == 'Ready')]}"
|
||||
register: control_plane_node_ready_conditions
|
||||
retries: "{{ control_plane_node_become_ready_tries }}"
|
||||
delay: 5
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
until: >
|
||||
control_plane_node_ready_conditions.stdout
|
||||
| from_json | selectattr('status', '==', 'True')
|
||||
| length == (groups['kube_control_plane'] | length)
|
||||
|
||||
2
roles/kubernetes/node-taint/defaults/main.yml
Normal file
2
roles/kubernetes/node-taint/defaults/main.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
node_taints: []
|
||||
@@ -14,13 +14,13 @@
|
||||
|
||||
- name: Populate inventory node taint
|
||||
set_fact:
|
||||
inventory_node_taints: "{{ inventory_node_taints + ['%s' | format(item)] }}"
|
||||
loop: "{{ node_taints | d([]) }}"
|
||||
inventory_node_taints: "{{ inventory_node_taints + node_taints }}"
|
||||
when:
|
||||
- node_taints is defined
|
||||
- node_taints is not string
|
||||
- node_taints is not mapping
|
||||
- node_taints is iterable
|
||||
|
||||
- debug: # noqa name[missing]
|
||||
var: role_node_taints
|
||||
- debug: # noqa name[missing]
|
||||
|
||||
@@ -440,6 +440,7 @@ cni_binary_checksums:
|
||||
1.6.0: sha256:d8d4bd74247407c8c73de057bc00adac28bb1ed2d2ee60a9dda278e3b398bcc2
|
||||
calicoctl_binary_checksums:
|
||||
arm64:
|
||||
3.30.6: sha256:47ecc00bdd797f82e4bac0ff3904c3a5143ba2d61e8ae1cbbce286ca76d3790a
|
||||
3.30.5: sha256:7611343e7a56e770b95e2bb882dda787efbbd4331b1dd6316ff8ea189238dfaa
|
||||
3.30.4: sha256:b21fbbc55b6f5d50c1c0faae714242cae3e013185cb8e26ce56981bd10da260d
|
||||
3.30.3: sha256:2ae0474b88a6042e5489d7410d2669a9d443c9d5c51e2bdc8ebe4d6dd98f2475
|
||||
@@ -461,6 +462,7 @@ calicoctl_binary_checksums:
|
||||
3.28.1: sha256:c062d13534498a427c793a4a9190be4df3cf796a3feb29e4a501e1d6f48daa7c
|
||||
3.28.0: sha256:c4ca8563d2a920729116a3a30171c481580c8c447938ce974ce14d7ce25a31bf
|
||||
amd64:
|
||||
3.30.6: sha256:2017e19727dca689d8bb73a9d8dff3c6a8ba7d8c75049f99ee207272161b5749
|
||||
3.30.5: sha256:6cdfb17b0276f648f4fdb051a5d75617a50b3c328d4cccfc40d087b96c361d80
|
||||
3.30.4: sha256:7e2e5e75b25c55683b68eabeb9b00390b1d359e72bf57f7ec2b76bb006fd175f
|
||||
3.30.3: sha256:a7d017d1abf6ef5d6e03267187c0dd68c32f5e937b64decd29d003be44fa6b94
|
||||
@@ -482,6 +484,7 @@ calicoctl_binary_checksums:
|
||||
3.28.1: sha256:22ec5727c38dbe19001792b4ca64ac760a6e2985d5c1a231d919dbebe5bca171
|
||||
3.28.0: sha256:4ea270699e67ca29e5533ddb0a68d370cb0005475796c7e841f83047da6297b6
|
||||
ppc64le:
|
||||
3.30.6: sha256:9a9c368499b1e3d08418dfbb566379483e15c50d08dd1bcaf6148c115d82ed36
|
||||
3.30.5: sha256:5b6de49da1af2633549bff5e8f4d8a573a175b65c47c29d327ef6a0760d39a93
|
||||
3.30.4: sha256:8fc8ef492d463e184e714bc6d31b05f9066c8af3445928efef233850f036bb92
|
||||
3.30.3: sha256:ccd13ced62baf633fb4347fbe6c9fdc0d3b1b7deb1794c83c015507a0cb8238e
|
||||
@@ -579,6 +582,7 @@ ciliumcli_binary_checksums:
|
||||
0.16.0: sha256:da98675f961833d4ffd68b1046d907b228a7d394ded2abd70a50b20eaca171c4
|
||||
calico_crds_archive_checksums:
|
||||
no_arch:
|
||||
3.30.6: sha256:d61aa5bcddfc78b0094acd54e0358009fa79e1cbe6d8c23bdacb34ff7a2c6c82
|
||||
3.30.5: sha256:3a38f91596c204b43c70f642a3e686d8c3fbfdfa5caa7824b716aa2f4a4e568b
|
||||
3.30.4: sha256:a9398f6de6cce8f683e0ad649a21f3d3b8bb5fe4cd26e7b26b33b9a8c740274f
|
||||
3.30.3: sha256:36c50905b9b62a78638bcfb9d1c4faf1efa08e2013265dcd694ec4e370b78dd7
|
||||
@@ -658,6 +662,7 @@ helm_archive_checksums:
|
||||
3.16.0: sha256:d13a4b87b31a5b50c8d93dd9988dfb312a61e56504102f466a4004e5a3ab8e9e
|
||||
cri_dockerd_archive_checksums:
|
||||
arm64:
|
||||
0.3.23: sha256:a78037d2d2e9c52c48372a5cbba7b94b1c57be5759449beef29cfe03cbe6f14b
|
||||
0.3.22: sha256:3260b214c9b12dbf0cbf4d60410c45aacfc31ba52aa7b74164135968e8950cb6
|
||||
0.3.21: sha256:35de6b1e8eba11d8ba6d71fa7499cb3d610a1e7b866c9d43b7f87029e3a769cd
|
||||
0.3.20: sha256:e6b4661c51c832ee1cbbb75d1c8b086fa803acc153d400454c3b8cf324547d89
|
||||
@@ -676,6 +681,7 @@ cri_dockerd_archive_checksums:
|
||||
0.3.6: sha256:793b8f57cecf734c47bface10387a8e90994c570b516cb755900f21ebd0a663b
|
||||
0.3.5: sha256:c20014dc5a71e6991a3bd7e1667c744e3807b5675b1724b26bb7c70093582cfe
|
||||
amd64:
|
||||
0.3.23: sha256:c7fe5db7f9396186193b58ded0e62a31eca7b3c58ad8691d57017986f96482ee
|
||||
0.3.22: sha256:6621a96a885c82844d12318de00f510eae3459871cf1ad47317f38dd242f9a03
|
||||
0.3.21: sha256:6c35838bc4b1aef74f9113670e114ca729a5f295f9457b226791e18e86e91698
|
||||
0.3.20: sha256:2ce46d6bbd7f6a7e06e211836c201fdc2311111913eccc63a03f6ef4fe1958fc
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: network_plugin/cni
|
||||
when: kube_network_plugin != 'none'
|
||||
|
||||
- role: network_plugin/cilium
|
||||
when: kube_network_plugin == 'cilium' or cilium_deploy_additionally
|
||||
tags:
|
||||
- cilium
|
||||
|
||||
- role: network_plugin/calico
|
||||
when: kube_network_plugin == 'calico'
|
||||
tags:
|
||||
- calico
|
||||
|
||||
- role: network_plugin/flannel
|
||||
when: kube_network_plugin == 'flannel'
|
||||
tags:
|
||||
- flannel
|
||||
|
||||
- role: network_plugin/macvlan
|
||||
when: kube_network_plugin == 'macvlan'
|
||||
tags:
|
||||
- macvlan
|
||||
|
||||
- role: network_plugin/kube-ovn
|
||||
when: kube_network_plugin == 'kube-ovn'
|
||||
tags:
|
||||
- kube-ovn
|
||||
|
||||
- role: network_plugin/kube-router
|
||||
when: kube_network_plugin == 'kube-router'
|
||||
tags:
|
||||
- kube-router
|
||||
|
||||
- role: network_plugin/custom_cni
|
||||
when: kube_network_plugin == 'custom_cni'
|
||||
tags:
|
||||
- custom_cni
|
||||
|
||||
- role: network_plugin/multus
|
||||
when: kube_network_plugin_multus
|
||||
tags:
|
||||
- multus
|
||||
47
roles/network_plugin/tasks/main.yml
Normal file
47
roles/network_plugin/tasks/main.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Container Network Interface plugin
|
||||
include_role:
|
||||
name: network_plugin/cni
|
||||
when: kube_network_plugin != 'none'
|
||||
|
||||
- name: Network plugin
|
||||
include_role:
|
||||
name: "network_plugin/{{ kube_network_plugin }}"
|
||||
apply:
|
||||
tags:
|
||||
- "{{ kube_network_plugin }}"
|
||||
- network
|
||||
when:
|
||||
- kube_network_plugin != 'none'
|
||||
tags:
|
||||
- cilium
|
||||
- calico
|
||||
- flannel
|
||||
- macvlan
|
||||
- kube-ovn
|
||||
- kube-router
|
||||
- custom_cni
|
||||
|
||||
- name: Cilium additional
|
||||
include_role:
|
||||
name: network_plugin/cilium
|
||||
apply:
|
||||
tags:
|
||||
- cilium
|
||||
- network
|
||||
when:
|
||||
- kube_network_plugin != 'cilium'
|
||||
- cilium_deploy_additionally
|
||||
tags:
|
||||
- cilium
|
||||
|
||||
- name: Multus
|
||||
include_role:
|
||||
name: network_plugin/multus
|
||||
apply:
|
||||
tags:
|
||||
- multus
|
||||
- network
|
||||
when: kube_network_plugin_multus
|
||||
tags:
|
||||
- multus
|
||||
@@ -20,7 +20,7 @@
|
||||
when:
|
||||
- not ignore_assert_errors
|
||||
|
||||
- name: Warn if `kube_network_plugin` is `none
|
||||
- name: Warn if `kube_network_plugin` is `none`
|
||||
debug:
|
||||
msg: |
|
||||
"WARNING! => `kube_network_plugin` is set to `none`. The network configuration will be skipped.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
@@ -29,7 +29,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
@@ -116,4 +116,9 @@ infos = {
|
||||
"graphql_id": "R_kgDODQ6RZw",
|
||||
"binary": True,
|
||||
},
|
||||
"prometheus_operator_crds": {
|
||||
"url": "https://github.com/prometheus-operator/prometheus-operator/releases/download/v{version}/stripped-down-crds.yaml",
|
||||
"graphql_id": "R_kgDOBBxPpw",
|
||||
"binary": True,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
@@ -27,14 +27,14 @@ RUN apt update -q \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg2 \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
libvirt-clients \
|
||||
qemu-utils \
|
||||
qemu-kvm \
|
||||
dnsmasq \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | tee /etc/apt/sources.list.d/docker.list \
|
||||
&& apt update -q \
|
||||
&& apt install --no-install-recommends -yq docker-ce \
|
||||
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
|
||||
@@ -44,9 +44,8 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||
|
||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v{{ kube_version }}/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
&& vagrant plugin install vagrant-libvirt \
|
||||
# Install Kubernetes collections
|
||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||
&& ansible-galaxy collection install kubernetes.core
|
||||
|
||||
@@ -18,7 +18,7 @@ if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
# Checkout the current tests/ directory ; even when testing old version,
|
||||
# we want the up-to-date test setup/provisionning
|
||||
git checkout "${CI_COMMIT_SHA}" -- tests/
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt
|
||||
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt
|
||||
fi
|
||||
|
||||
export ANSIBLE_BECOME=true
|
||||
@@ -58,7 +58,7 @@ fi
|
||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
git checkout "${CI_COMMIT_SHA}"
|
||||
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt
|
||||
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt
|
||||
|
||||
case "${UPGRADE_TEST}" in
|
||||
"basic")
|
||||
|
||||
Reference in New Issue
Block a user