CI: convert vagrant jobs to kubevirt

Vagrant jobs needs a big cache which makes them slow / sometimes stuck
completely. Using the kubevirt provisionning playbook is now
significantly faster, so do just that.

Having only one provisionner in CI will also allows us to remove some of
the custom runners executors we use for vagrant, and more generally
reduce the CI maintenance.

Our kubevirt CI platform does not support ivp6 yet, so we keep the
relevant jobs in vagrant, but we'll migrate them as well as soon as
possible.
This commit is contained in:
Max Gautier
2025-03-14 15:15:57 +01:00
parent 862aec4dc6
commit 43fceebdd3
21 changed files with 47 additions and 131 deletions

View File

@@ -42,12 +42,17 @@ pr:
- debian11-calico-collection
- debian11-macvlan
- debian12-cilium
- fedora39-kube-router
# FIXME: this test if broken (perma-failing)
- openeuler24-calico
- opensuse15-6-calico
- rockylinux8-calico
- rockylinux9-cilium
- ubuntu20-calico-all-in-one-hardening
- ubuntu20-cilium-sep
- ubuntu20-flannel-collection
- ubuntu20-kube-router-sep
- ubuntu20-kube-router-svc-proxy
- ubuntu22-calico-all-in-one
- ubuntu22-calico-all-in-one-upgrade
- ubuntu24-calico-etcd-datastore
@@ -120,6 +125,7 @@ pr_extended:
- opensuse15-6-docker-cilium
- rockylinux9-calico
- ubuntu20-calico-etcd-kubeadm
- ubuntu20-flannel
- ubuntu22-all-in-one-docker
- ubuntu24-all-in-one-docker
- ubuntu24-calico-all-in-one

View File

@@ -1,13 +1,13 @@
---
.vagrant:
extends: .testcases
vagrant:
extends: .job-moderated
needs:
- ci-not-authorized
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb
DOCKER_NAME: vagrant
VAGRANT_ANSIBLE_TAGS: facts
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
@@ -35,47 +35,12 @@
- .vagrant.d/boxes
- .cache/pip
policy: pull-push # TODO: change to "pull" when not on main
vagrant_ubuntu24-calico-dual-stack:
stage: deploy-extended
extends: .vagrant
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu24-calico-ipv6only-stack:
stage: deploy-extended
extends: .vagrant
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel:
stage: deploy-part1
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel-collection:
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_ubuntu20-kube-router-sep:
stage: deploy-extended
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu20-kube-router-svc-proxy:
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_fedora39-kube-router:
stage: deploy-extended
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
parallel:
matrix:
- TESTCASE:
- ubuntu24-calico-dual-stack
- ubuntu24-calico-ipv6only-stack

View File

@@ -0,0 +1,7 @@
---
cloud_image: fedora-39
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1,8 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: flannel

View File

@@ -0,0 +1 @@
ubuntu20-flannel-collection.yml

View File

@@ -0,0 +1,7 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1,10 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: "kube-router"
kube_router_run_service_proxy: true

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "fedora39"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,6 +0,0 @@
---
# Instance settings
cloud_image: fedora-39
# Kubespray settings
kube_network_plugin: kube-router

View File

@@ -1,9 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,3 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -1,8 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,4 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel
ansible_ssh_private_key: .vagrant.d/insecure_private_key

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "ubuntu2004"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,8 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router

View File

@@ -1,10 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,10 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router
kube_router_run_service_proxy: true