Files
kubespray/tests/scripts/testcases_run.sh
Max Gautier 97a3776d8e Remove etcd member by peerURLs (#12682)
* Remove etcd member by peerURLs

The way to obtain the IP of a particular member is convoluted and depend
on multiple variables. The match is also textual and it's not clear
against what we're matching

It's also broken for etcd member which are not also Kubernetes nodes,
because the "Lookup node IP in kubernetes" task will fail and abort the
play.

Instead, match against 'peerURLs', which does not need new variable, and
use json output.

* Add testcase for etcd removal on external etcd

* do not merge

* fixup! Remove etcd member by peerURLs

* fixup! Remove etcd member by peerURLs
2025-11-10 03:52:56 -08:00

102 lines
2.7 KiB
Bash
Executable File

#!/bin/bash
set -euxo pipefail
if [[ -v TESTCASE ]]; then
TESTCASE_FILE=files/${TESTCASE}.yml
else
TESTCASE_FILE=common_vars.yml
TESTCASE=default
fi
echo "TESTCASE is $TESTCASE"
source tests/files/$TESTCASE || true
# Check out latest tag if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
git fetch --all && git checkout $(git describe --tags --abbrev=0)
# Checkout the current tests/ directory ; even when testing old version,
# we want the up-to-date test setup/provisionning
git checkout "${CI_COMMIT_SHA}" -- tests/
pip install --no-compile --no-cache-dir -r requirements.txt
fi
export ANSIBLE_BECOME=true
export ANSIBLE_BECOME_USER=root
# Test collection build and install by installing our collection, emptying our repository, adding
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
# running the same tests as before
if [[ "${TESTCASE}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-*.tar.gz
fi
run_playbook () {
if [[ "${TESTCASE}" =~ "collection" ]]; then
playbook=kubernetes_sigs.kubespray.$1
else
playbook=$1.yml
fi
shift
ansible-playbook \
-e @tests/common_vars.yml \
-e @tests/${TESTCASE_FILE} \
-e local_release_dir=${PWD}/downloads \
"$@" \
${playbook}
}
## START KUBESPRAY
# Create cluster
if [[ "${TESTCASE}" =~ "scale" ]]; then
run_playbook cluster --limit '!for_scale'
run_playbook scale --limit 'for_scale'
else
run_playbook cluster
fi
# Repeat deployment if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
git checkout "${CI_COMMIT_SHA}"
pip install --no-compile --no-cache-dir -r requirements.txt
case "${UPGRADE_TEST}" in
"basic")
run_playbook cluster
;;
"graceful")
run_playbook upgrade_cluster
;;
*)
;;
esac
fi
# Test control plane recovery
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
run_playbook reset --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}" -e reset_confirmation=yes
run_playbook recover-control-plane -e etcd_retries=10 --limit "etcd:kube_control_plane"
fi
# Run tests
ansible-playbook \
-e @tests/common_vars.yml \
-e @tests/${TESTCASE_FILE} \
-e local_release_dir=${PWD}/downloads \
tests/testcases/tests.yml
# Test node removal procedure
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
run_playbook remove-node -e skip_confirmation=yes -e node="${REMOVE_NODE_NAME}"
fi
# Clean up at the end, this is to allow stage1 tests to include cleanup test
if [ "${RESET_CHECK}" = "true" ]; then
run_playbook reset -e reset_confirmation=yes
fi