Merge pull request #12037 from VannTen/ci/convert_vagrant_to_kubevirt_2

CI: convert remaining vagrant jobs (except IPv6) to kubevirt + cleanups
This commit is contained in:
Kubernetes Prow Robot
2025-04-09 01:16:42 -07:00
committed by GitHub
101 changed files with 269 additions and 617 deletions

View File

@@ -39,5 +39,7 @@ exclude_paths:
- .github
- .ansible
- .cache
- .gitlab-ci.yml
- .gitlab-ci
mock_modules:
- gluster.gluster.gluster_volume

View File

@@ -31,12 +31,12 @@ variables:
ANSIBLE_VERBOSITY: 2
RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
TERRAFORM_VERSION: 1.3.7
TF_VERSION: 1.3.7
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
before_script:
- ./tests/scripts/rebase.sh
- mkdir -p /.ssh
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
.job: &job
tags:
@@ -59,18 +59,6 @@ before_script:
- pre-commit # lint
- vagrant-validate # lint
.testcases: &testcases
extends: .job-moderated
interruptible: true
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
script:
- ./tests/scripts/testcases_run.sh
after_script:
- ./tests/scripts/testcases_cleanup.sh
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
# Premoderated with manual actions
ci-not-authorized:
@@ -102,6 +90,6 @@ include:
- .gitlab-ci/build.yml
- .gitlab-ci/lint.yml
- .gitlab-ci/terraform.yml
- .gitlab-ci/packet.yml
- .gitlab-ci/kubevirt.yml
- .gitlab-ci/vagrant.yml
- .gitlab-ci/molecule.yml

View File

@@ -1,5 +1,5 @@
---
.build-container:
pipeline-image:
cache:
key: $CI_COMMIT_REF_SLUG
paths:
@@ -11,23 +11,19 @@
name: gcr.io/kaniko-project/executor:debug
entrypoint: ['']
variables:
TAG: $CI_COMMIT_SHORT_SHA
PROJECT_DIR: $CI_PROJECT_DIR
DOCKERFILE: Dockerfile
GODEBUG: "http2client=0"
before_script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
# TODO: remove the override
# currently rebase.sh depends on bash (not available in the kaniko image)
# once we have a simpler rebase (which should be easy if the target branch ref is available as variable
# we'll be able to rebase here as well hopefully
before_script: []
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=true
--cache-dir=image-cache
--context $PROJECT_DIR
--dockerfile $PROJECT_DIR/$DOCKERFILE
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/pipeline.Dockerfile
--label 'git-branch'=$CI_COMMIT_REF_SLUG
--label 'git-tag=$CI_COMMIT_TAG'
--destination $PIPELINE_IMAGE
--log-timestamp=true
pipeline-image:
extends: .build-container
variables:
DOCKERFILE: pipeline.Dockerfile

147
.gitlab-ci/kubevirt.yml Normal file
View File

@@ -0,0 +1,147 @@
---
.kubevirt:
extends: .job-moderated
interruptible: true
script:
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
-c local -e @"tests/files/${TESTCASE}.yml"
- ./tests/scripts/testcases_run.sh
variables:
ANSIBLE_TIMEOUT: "120"
tags:
- ffci
needs:
- pipeline-image
- ci-not-authorized
# TODO: generate testcases matrixes from the files in tests/files/
# this is needed to avoid the need for PR rebasing when a job was added or remvoed in the target branch
# (currently, a removed job in the target branch breaks the tests, because the
# pipeline definition is parsed by gitlab before the rebase.sh script)
# CI template for PRs
pr:
stage: deploy-part1
rules:
- if: $PR_LABELS =~ /.*ci-short.*/
when: manual
allow_failure: true
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
extends: .kubevirt
parallel:
matrix:
- TESTCASE:
- almalinux8-calico
- almalinux9-crio
- almalinux9-kube-ovn
- debian11-calico-collection
- debian11-macvlan
- debian12-cilium
- fedora39-kube-router
# FIXME: this test if broken (perma-failing)
- openeuler24-calico
- opensuse15-6-calico
- rockylinux8-calico
- rockylinux9-cilium
- ubuntu20-calico-all-in-one-hardening
- ubuntu20-cilium-sep
- ubuntu20-flannel-collection
- ubuntu20-kube-router-sep
- ubuntu20-kube-router-svc-proxy
- ubuntu22-calico-all-in-one
- ubuntu22-calico-all-in-one-upgrade
- ubuntu24-calico-etcd-datastore
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
ubuntu20-calico-all-in-one:
stage: deploy-part1
extends: .kubevirt
variables:
TESTCASE: ubuntu20-calico-all-in-one
rules:
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
pr_full:
extends: .kubevirt
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*ci-full.*/
when: on_success
# Else run as manual
- when: manual
allow_failure: true
parallel:
matrix:
- TESTCASE:
- almalinux9-calico-ha-ebpf
- almalinux9-calico-nodelocaldns-secondary
- debian11-custom-cni
- debian11-kubelet-csr-approver
- debian12-custom-cni-helm
- fedora39-calico-swap-selinux
- fedora39-crio
- ubuntu20-all-in-one-docker
- ubuntu20-calico-ha-wireguard
- ubuntu20-flannel-ha
- ubuntu20-flannel-ha-once
# Need an update of the container image to use schema v2
# update: quay.io/kubespray/vm-amazon-linux-2:latest
manual:
extends: pr_full
parallel:
matrix:
- TESTCASE:
- amazon-linux-2-all-in-one
rules:
- when: manual
allow_failure: true
pr_extended:
extends: .kubevirt
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
- when: manual
allow_failure: true
parallel:
matrix:
- TESTCASE:
- almalinux9-calico
- almalinux9-calico-remove-node
- almalinux9-docker
- debian11-docker
- debian12-calico
- debian12-docker
- opensuse15-6-docker-cilium
- rockylinux9-calico
- ubuntu20-calico-etcd-kubeadm
- ubuntu20-flannel
- ubuntu22-all-in-one-docker
- ubuntu24-all-in-one-docker
- ubuntu24-calico-all-in-one
# Enabled when PERIODIC_CI_ENABLED var is set
periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .kubevirt
parallel:
matrix:
- TESTCASE:
- debian11-calico-upgrade
- debian11-calico-upgrade-once
- debian12-cilium-svc-proxy
- fedora39-calico-selinux
- fedora40-docker-calico
- ubuntu20-calico-etcd-kubeadm-upgrade-ha
- ubuntu20-calico-ha-recover
- ubuntu20-calico-ha-recover-noquorum

View File

@@ -8,8 +8,6 @@
needs:
- pipeline-image
# - ci-not-authorized
before_script:
- ./tests/scripts/rebase.sh
script:
- ./tests/scripts/molecule_run.sh
after_script:

View File

@@ -1,257 +0,0 @@
---
.packet:
extends: .testcases
variables:
ANSIBLE_TIMEOUT: "120"
CI_PLATFORM: packet
SSH_USER: kubespray
tags:
- ffci
needs:
- pipeline-image
- ci-not-authorized
# CI template for PRs
.packet_pr:
stage: deploy-part1
rules:
- if: $PR_LABELS =~ /.*ci-short.*/
when: manual
allow_failure: true
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
extends: .packet
## Uncomment this to have multiple stages
# needs:
# - packet_ubuntu20-calico-all-in-one
.packet_pr_short:
stage: deploy-part1
extends: .packet
rules:
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
.packet_pr_manual:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*ci-full.*/
when: on_success
# Else run as manual
- when: manual
allow_failure: true
.packet_pr_extended:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
- when: manual
allow_failure: true
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .packet
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
packet_ubuntu20-calico-all-in-one:
stage: deploy-part1
extends: .packet_pr_short
variables:
RESET_CHECK: "true"
# ### PR JOBS PART2
packet_ubuntu20-crio:
extends: .packet_pr_manual
packet_ubuntu22-calico-all-in-one:
extends: .packet_pr
packet_ubuntu22-calico-all-in-one-upgrade:
extends: .packet_pr
variables:
UPGRADE_TEST: graceful
packet_ubuntu24-calico-etcd-datastore:
extends: .packet_pr
packet_almalinux9-crio:
extends: .packet_pr
packet_almalinux9-kube-ovn:
extends: .packet_pr
packet_debian11-calico-collection:
extends: .packet_pr
packet_debian11-macvlan:
extends: .packet_pr
packet_debian12-cilium:
extends: .packet_pr
packet_almalinux8-calico:
extends: .packet_pr
packet_rockylinux8-calico:
extends: .packet_pr
packet_rockylinux9-cilium:
extends: .packet_pr
variables:
RESET_CHECK: "true"
# Need an update of the container image to use schema v2
# update: quay.io/kubespray/vm-amazon-linux-2:latest
packet_amazon-linux-2-all-in-one:
extends: .packet_pr_manual
rules:
- when: manual
allow_failure: true
packet_opensuse15-6-calico:
extends: .packet_pr
packet_ubuntu20-cilium-sep:
extends: .packet_pr
packet_openeuler24-calico:
extends: .packet_pr
packet_ubuntu20-calico-all-in-one-hardening:
extends: .packet_pr
## Extended
packet_debian11-docker:
extends: .packet_pr_extended
packet_debian12-docker:
extends: .packet_pr_extended
packet_debian12-calico:
extends: .packet_pr_extended
packet_almalinux9-calico-remove-node:
extends: .packet_pr_extended
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_rockylinux9-calico:
extends: .packet_pr_extended
packet_almalinux9-calico:
extends: .packet_pr_extended
packet_almalinux9-docker:
extends: .packet_pr_extended
packet_opensuse15-6-docker-cilium:
extends: .packet_pr_extended
packet_ubuntu24-calico-all-in-one:
extends: .packet_pr_extended
packet_ubuntu20-calico-etcd-kubeadm:
extends: .packet_pr_extended
packet_ubuntu24-all-in-one-docker:
extends: .packet_pr_extended
packet_ubuntu22-all-in-one-docker:
extends: .packet_pr_extended
# ### MANUAL JOBS
packet_fedora39-crio:
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha:
extends: .packet_pr_manual
packet_ubuntu20-all-in-one-docker:
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha-once:
extends: .packet_pr_manual
packet_fedora39-calico-swap-selinux:
extends: .packet_pr_manual
packet_almalinux9-calico-ha-ebpf:
extends: .packet_pr_manual
packet_almalinux9-calico-nodelocaldns-secondary:
extends: .packet_pr_manual
packet_debian11-custom-cni:
extends: .packet_pr_manual
packet_debian11-kubelet-csr-approver:
extends: .packet_pr_manual
packet_debian12-custom-cni-helm:
extends: .packet_pr_manual
packet_ubuntu20-calico-ha-wireguard:
extends: .packet_pr_manual
# PERIODIC
packet_fedora40-docker-calico:
stage: deploy-extended
extends: .packet_periodic
variables:
RESET_CHECK: "true"
packet_fedora39-calico-selinux:
stage: deploy-extended
extends: .packet_periodic
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: basic
packet_debian11-calico-upgrade-once:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: graceful
packet_ubuntu20-calico-ha-recover:
stage: deploy-extended
extends: .packet_periodic
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
packet_ubuntu20-calico-ha-recover-noquorum:
stage: deploy-extended
extends: .packet_periodic
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
packet_debian11-calico-upgrade:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: graceful
packet_debian12-cilium-svc-proxy:
stage: deploy-extended
extends: .packet_periodic

View File

@@ -5,28 +5,21 @@
needs:
- ci-not-authorized
- pipeline-image
variables:
TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub"
TF_VAR_ssh_private_key_path: $ANSIBLE_PRIVATE_KEY_FILE
CLUSTER: $CI_COMMIT_REF_NAME
TERRAFORM_STATE_ROOT: $CI_PROJECT_DIR
stage: deploy-part1
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
- ./tests/scripts/terraform_install.sh
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
# Prepare inventory
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
- ln -s contrib/terraform/$PROVIDER/hosts
- ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts
- terraform -chdir="contrib/terraform/$PROVIDER" init
# Copy SSH keypair
- mkdir -p ~/.ssh
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- mkdir -p contrib/terraform/$PROVIDER/group_vars
# Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
.terraform_validate:
terraform_validate:
extends: .terraform_install
tags: [ffci]
only: ['master', /^pr-.*$/]
@@ -36,6 +29,17 @@
stage: test
needs:
- pipeline-image
parallel:
matrix:
- PROVIDER:
- openstack
- equinix
- aws
- exoscale
- hetzner
- vsphere
- upcloud
- nifcloud
.terraform_apply:
extends: .terraform_install
@@ -43,99 +47,24 @@
stage: deploy-extended
when: manual
only: [/^pr-.*$/]
artifacts:
when: always
paths:
- cluster-dump/
variables:
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
ANSIBLE_INVENTORY: hosts
CI_PLATFORM: tf
TF_VAR_ssh_user: $SSH_USER
ANSIBLE_REMOTE_USER: ubuntu # the openstack terraform module does not handle custom user correctly
ANSIBLE_SSH_RETRIES: 15
TF_VAR_ssh_user: $ANSIBLE_REMOTE_USER
TF_VAR_cluster_name: $CI_JOB_ID
script:
# Set Ansible config
- cp ansible.cfg ~/.ansible.cfg
- ssh-keygen -N '' -f $ANSIBLE_PRIVATE_KEY_FILE -t rsa
- mkdir -p contrib/terraform/$PROVIDER/group_vars
# Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
- terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
- tests/scripts/testcases_run.sh
after_script:
# Cleanup regardless of exit code
- ./tests/scripts/testcases_cleanup.sh
tf-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-equinix:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: equinix
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-aws:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: exoscale
tf-validate-hetzner:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: hetzner
tf-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-upcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: upcloud
CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-nifcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: nifcloud
# tf-packet-ubuntu20-default:
# extends: .terraform_apply
# variables:
# TF_VERSION: $TERRAFORM_VERSION
# PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1"
# TF_VAR_number_of_k8s_nodes: "1"
# TF_VAR_plan_k8s_masters: t1.small.x86
# TF_VAR_plan_k8s_nodes: t1.small.x86
# TF_VAR_metro: am
# TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_20_04
.ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
OS_PROJECT_NAME: "9361447987648822"
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: 8XuhBMfkKVrk
OS_REGION_NAME: UK1
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
- terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
# Elastx is generously donating resources for Kubespray on Openstack CI
# Contacts: @gix @bl0m1
@@ -169,11 +98,8 @@ tf-elastx_ubuntu20-calico:
allow_failure: true
variables:
<<: *elastx_variables
TF_VERSION: $TERRAFORM_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
@@ -194,46 +120,3 @@ tf-elastx_ubuntu20-calico:
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_image: ubuntu-20.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# OVH voucher expired, commenting job until things are sorted out
# tf-ovh_cleanup:
# stage: unit-tests
# tags: [light]
# image: python
# environment: ovh
# variables:
# <<: *ovh_variables
# before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt
# script:
# - ./scripts/openstack-cleanup/main.py
# tf-ovh_ubuntu20-calico:
# extends: .terraform_apply
# when: on_success
# environment: ovh
# variables:
# <<: *ovh_variables
# TF_VERSION: $TERRAFORM_VERSION
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 20.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -1,13 +1,13 @@
---
.vagrant:
extends: .testcases
vagrant:
extends: .job-moderated
needs:
- ci-not-authorized
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb
DOCKER_NAME: vagrant
VAGRANT_ANSIBLE_TAGS: facts
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
@@ -28,54 +28,22 @@
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- vagrant up
- ./tests/scripts/testcases_run.sh
after_script:
- vagrant destroy -f
cache:
key: $CI_JOB_NAME_SLUG
paths:
- .vagrant.d/boxes
- .cache/pip
policy: pull-push # TODO: change to "pull" when not on main
vagrant_ubuntu24-calico-dual-stack:
stage: deploy-extended
extends: .vagrant
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu24-calico-ipv6only-stack:
stage: deploy-extended
extends: .vagrant
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel:
stage: deploy-part1
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel-collection:
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_ubuntu20-kube-router-sep:
stage: deploy-extended
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu20-kube-router-svc-proxy:
stage: deploy-extended
extends: .vagrant
when: manual
vagrant_fedora39-kube-router:
stage: deploy-extended
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
parallel:
matrix:
- TESTCASE:
- ubuntu24-calico-dual-stack
- ubuntu24-calico-ipv6only-stack

View File

@@ -14,6 +14,6 @@ provisioner:
callbacks_enabled: profile_tasks
timeout: 120
playbooks:
create: ../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -22,6 +22,6 @@ provisioner:
hosts:
bastion-01:
playbooks:
create: ../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -32,6 +32,6 @@ provisioner:
name: foo
comment: My test comment
playbooks:
create: ../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -34,6 +34,6 @@ provisioner:
callbacks_enabled: profile_tasks
timeout: 120
playbooks:
create: ../../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -26,6 +26,6 @@ provisioner:
all:
become: true
playbooks:
create: ../../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -42,6 +42,6 @@ provisioner:
callbacks_enabled: profile_tasks
timeout: 120
playbooks:
create: ../../../../../tests/cloud_playbooks/create-packet.yml
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
verifier:
name: testinfra

View File

@@ -8,7 +8,7 @@ $(ANSIBLE_INVENTORY):
mkdir $@
create-packet: | $(ANSIBLE_INVENTORY)
ansible-playbook cloud_playbooks/create-packet.yml -c local \
ansible-playbook cloud_playbooks/create-kubevirt.yml -c local \
-e @"files/${CI_JOB_NAME}.yml"
delete-packet: ;

View File

@@ -1,8 +1,4 @@
---
- name: Include custom vars for ci job
include_vars: "../files/{{ lookup('ansible.builtin.env', 'CI_JOB_NAME') }}.yml"
when: molecule_yml is not defined
- name: Generate SSH keypair
community.crypto.openssh_keypair:
size: 2048

View File

@@ -0,0 +1,2 @@
REMOVE_NODE_CHECK=true
REMOVE_NODE_NAME=instance-3

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -0,0 +1,7 @@
---
cloud_image: fedora-39
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
RESET_CHECK=true

View File

@@ -0,0 +1 @@
UPGRADE_TEST=basic

View File

@@ -0,0 +1,2 @@
RECOVER_CONTROL_PLANE_TEST=true
RECOVER_CONTROL_PLANE_TEST_GROUPS="etcd[2:]:kube_control_plane[1:]"

View File

@@ -0,0 +1,2 @@
RECOVER_CONTROL_PLANE_TEST=true
RECOVER_CONTROL_PLANE_TEST_GROUPS="etcd[1:]:kube_control_plane[1:]"

View File

@@ -0,0 +1,8 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: flannel

View File

@@ -0,0 +1 @@
ubuntu20-flannel-collection.yml

View File

@@ -0,0 +1,7 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_node']
kube_network_plugin: "kube-router"

View File

@@ -0,0 +1,10 @@
---
cloud_image: ubuntu-2004
cluster_layout:
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['kube_control_plane', 'etcd', 'kube_node']
- node_groups: ['etcd', 'kube_node']
kube_network_plugin: "kube-router"
kube_router_run_service_proxy: true

View File

@@ -0,0 +1 @@
UPGRADE_TEST=graceful

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "fedora39"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,6 +0,0 @@
---
# Instance settings
cloud_image: fedora-39
# Kubespray settings
kube_network_plugin: kube-router

View File

@@ -1,9 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,3 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel

View File

@@ -1,8 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

View File

@@ -1,4 +0,0 @@
---
# Kubespray settings
kube_network_plugin: flannel
ansible_ssh_private_key: .vagrant.d/insecure_private_key

View File

@@ -1,15 +0,0 @@
$num_instances = 2
$vm_memory ||= 2048
$os = "ubuntu2004"
$control_plane_instances = 1
$etcd_instances = 1
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,8 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router

View File

@@ -1,10 +0,0 @@
$os = "ubuntu2004"
# For CI we are not worried about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "kube-router"

View File

@@ -1,10 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-2004
mode: separate
# Kubespray settings
bootstrap_os: ubuntu
kube_network_plugin: kube-router
kube_router_run_service_proxy: true

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd ..
terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd ..
terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve

View File

@@ -90,7 +90,7 @@ for f in files:
container_manager = y.get('container_manager', 'containerd')
network_plugin = y.get('kube_network_plugin', 'calico')
x = re.match(r"^[a-z-]+_([a-z0-9]+).*", f.name)
operating_system = x.group(1)
x = re.match(r"^([a-z-]+_)?([a-z0-9]+).*", f.name)
operating_system = x.group(2)
data.set(container_manager=container_manager, network_plugin=network_plugin, os=operating_system)
print(data.jinja(), file=open(args.output, 'w'))

View File

@@ -1,4 +0,0 @@
#!/bin/bash
set -euxo pipefail
make -C tests delete-${CI_PLATFORM} -s

View File

@@ -1,7 +0,0 @@
#!/bin/bash
set -euxo pipefail
mkdir -p /.ssh
mkdir -p cluster-dump
mkdir -p $HOME/.ssh
ansible-playbook --version

Some files were not shown because too many files have changed in this diff Show More