Merge pull request #12037 from VannTen/ci/convert_vagrant_to_kubevirt_2

CI: convert remaining vagrant jobs (except IPv6) to kubevirt + cleanups
This commit is contained in:
Kubernetes Prow Robot
2025-04-09 01:16:42 -07:00
committed by GitHub
101 changed files with 269 additions and 617 deletions

View File

@@ -8,7 +8,7 @@ $(ANSIBLE_INVENTORY):
mkdir $@
create-packet: | $(ANSIBLE_INVENTORY)
ansible-playbook cloud_playbooks/create-packet.yml -c local \
ansible-playbook cloud_playbooks/create-kubevirt.yml -c local \
-e @"files/${CI_JOB_NAME}.yml"
delete-packet: ;

View File

@@ -1,8 +1,4 @@
---
- name: Include custom vars for ci job
include_vars: "../files/{{ lookup('ansible.builtin.env', 'CI_JOB_NAME') }}.yml"
when: molecule_yml is not defined
- name: Generate SSH keypair
community.crypto.openssh_keypair:
size: 2048

View File

@@ -0,0 +1,2 @@
REMOVE_NODE_CHECK=true
REMOVE_NODE_NAME=instance-3

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -0,0 +1,7 @@
---
cloud_image: fedora-39
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
UPGRADE_TEST=basic

View File

@@ -0,0 +1,2 @@
RECOVER_CONTROL_PLANE_TEST=true
RECOVER_CONTROL_PLANE_TEST_GROUPS="etcd[2:]:kube_control_plane[1:]"

View File

@@ -0,0 +1,2 @@
RECOVER_CONTROL_PLANE_TEST=true
RECOVER_CONTROL_PLANE_TEST_GROUPS="etcd[1:]:kube_control_plane[1:]"

View File

@@ -0,0 +1,8 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: flannel

View File

@@ -0,0 +1 @@
ubuntu20-flannel-collection.yml

View File

@@ -0,0 +1,7 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1,10 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: "kube-router"
kube_router_run_service_proxy: true

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "fedora39"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,6 +0,0 @@
---
# Instance settings
cloud_image: fedora-39
# Kubespray settings
kube_network_plugin: kube-router

View File

@@ -1,9 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,3 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -1,8 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,4 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel
ansible_ssh_private_key: .vagrant.d/insecure_private_key

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "ubuntu2004"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,8 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router

View File

@@ -1,10 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,10 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router
kube_router_run_service_proxy: true

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd ..
terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd ..
terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve

View File

@@ -90,7 +90,7 @@ for f in files:
container_manager = y.get('container_manager', 'containerd')
network_plugin = y.get('kube_network_plugin', 'calico')
x = re.match(r"^[a-z-]+_([a-z0-9]+).*", f.name)
operating_system = x.group(1)
x = re.match(r"^([a-z-]+_)?([a-z0-9]+).*", f.name)
operating_system = x.group(2)
data.set(container_manager=container_manager, network_plugin=network_plugin, os=operating_system)
print(data.jinja(), file=open(args.output, 'w'))

View File

@@ -1,4 +0,0 @@
#!/bin/bash
set -euxo pipefail
make -C tests delete-${CI_PLATFORM} -s

View File

@@ -1,7 +0,0 @@
#!/bin/bash
set -euxo pipefail
mkdir -p /.ssh
mkdir -p cluster-dump
mkdir -p $HOME/.ssh
ansible-playbook --version

View File

@@ -1,20 +1,17 @@
#!/bin/bash
set -euxo pipefail
echo "CI_JOB_NAME is $CI_JOB_NAME"
if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then
if [ "${UPGRADE_TEST}" == "false" ]; then
echo "Job name contains 'upgrade', but UPGRADE_TEST='false'"
exit 1
fi
if [[ -v TESTCASE ]]; then
TESTCASE_FILE=files/${TESTCASE}.yml
else
if [ "${UPGRADE_TEST}" != "false" ]; then
echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'"
exit 1
fi
TESTCASE_FILE=common_vars.yml
TESTCASE=default
fi
echo "TESTCASE is $TESTCASE"
source tests/files/$TESTCASE || true
# Check out latest tag if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
git fetch --all && git checkout $(git describe --tags --abbrev=0)
@@ -26,12 +23,10 @@ fi
export ANSIBLE_BECOME=true
export ANSIBLE_BECOME_USER=root
make -C tests create-${CI_PLATFORM} -s
# Test collection build and install by installing our collection, emptying our repository, adding
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
# running the same tests as before
if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
if [[ "${TESTCASE}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
@@ -66,10 +61,9 @@ fi
run_playbook () {
playbook=$1
shift
# We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option
ansible-playbook \
-e @tests/common_vars.yml \
-e @tests/files/${CI_JOB_NAME}.yml \
-e @tests/${TESTCASE_FILE} \
-e local_release_dir=${PWD}/downloads \
"$@" \
${playbook}
@@ -111,10 +105,10 @@ run_playbook tests/testcases/015_check-nodes-ready.yml
## Test that all nodes are Ready
if [[ ! ( "$CI_JOB_NAME" =~ "macvlan" ) ]]; then
if [[ ! ( "$TESTCASE" =~ "macvlan" ) ]]; then
run_playbook tests/testcases/020_check-pods-running.yml
run_playbook tests/testcases/030_check-network.yml
if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
if [[ ! ( "$TESTCASE" =~ "hardening" ) ]]; then
# TODO: We need to remove this condition by finding alternative container
# image instead of netchecker which doesn't work at hardening environments.
run_playbook tests/testcases/040_check-network-adv.yml