Merge pull request #12302 from VannTen/ci/factorize_molecule_scenario

CI: cleanup and factorization of molecule tests
This commit is contained in:
Kubernetes Prow Robot
2025-06-17 10:23:00 -07:00
committed by GitHub
34 changed files with 270 additions and 552 deletions

View File

@@ -24,6 +24,7 @@ variables:
ANSIBLE_REMOTE_USER: kubespray ANSIBLE_REMOTE_USER: kubespray
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
ANSIBLE_INVENTORY: /tmp/inventory ANSIBLE_INVENTORY: /tmp/inventory
ANSIBLE_STDOUT_CALLBACK: "debug"
RESET_CHECK: "false" RESET_CHECK: "false"
REMOVE_NODE_CHECK: "false" REMOVE_NODE_CHECK: "false"
UPGRADE_TEST: "false" UPGRADE_TEST: "false"
@@ -48,8 +49,6 @@ before_script:
- cluster-dump/ - cluster-dump/
needs: needs:
- pipeline-image - pipeline-image
variables:
ANSIBLE_STDOUT_CALLBACK: "debug"
.job-moderated: .job-moderated:
extends: .job extends: .job

View File

@@ -6,6 +6,7 @@ pre-commit:
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9' image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
variables: variables:
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
ANSIBLE_STDOUT_CALLBACK: default
script: script:
- pre-commit run --all-files --show-diff-on-failure - pre-commit run --all-files --show-diff-on-failure
cache: cache:

View File

@@ -34,6 +34,8 @@ molecule:
- container-engine/cri-dockerd - container-engine/cri-dockerd
- container-engine/containerd - container-engine/containerd
- container-engine/cri-o - container-engine/cri-o
- container-engine/gvisor
- container-engine/youki
- adduser - adduser
- bastion-ssh-config - bastion-ssh-config
- bootstrap_os - bootstrap_os
@@ -51,5 +53,3 @@ molecule_full:
- ROLE: - ROLE:
# FIXME : tests below are perma-failing # FIXME : tests below are perma-failing
- container-engine/kata-containers - container-engine/kata-containers
- container-engine/gvisor
- container-engine/youki

View File

@@ -6,35 +6,12 @@
# - to ensure we keep compatibility with old style group names # - to ensure we keep compatibility with old style group names
# - to reduce inventory boilerplate (defining parent groups / empty groups) # - to reduce inventory boilerplate (defining parent groups / empty groups)
- name: Define groups for legacy less structured inventories - name: Inventory setup and validation
hosts: all
gather_facts: false
tags: always
tasks:
- name: Match needed groups by their old names or definition
vars:
group_mappings:
kube_control_plane:
- kube-master
kube_node:
- kube-node
calico_rr:
- calico-rr
no_floating:
- no-floating
k8s_cluster:
- kube_node
- kube_control_plane
- calico_rr
group_by:
key: "{{ (group_names | intersect(item.value) | length > 0) | ternary(item.key, '_all') }}"
loop: "{{ group_mappings | dict2items }}"
- name: Check inventory settings
hosts: all hosts: all
gather_facts: false gather_facts: false
tags: always tags: always
roles: roles:
- dynamic_groups
- validate_inventory - validate_inventory
- name: Install bastion ssh config - name: Install bastion ssh config

View File

@@ -35,5 +35,6 @@ provisioner:
timeout: 120 timeout: 120
playbooks: playbooks:
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
prepare: ../../../molecule/prepare.yml
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,30 +0,0 @@
---
- name: Prepare
hosts: all
gather_facts: false
become: true
vars:
ignore_assert_errors: true
roles:
- role: kubespray_defaults
- role: bootstrap_os
- role: network_facts
- role: kubernetes/preinstall
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare CNI
hosts: all
gather_facts: false
become: true
vars:
ignore_assert_errors: true
kube_network_plugin: cni
roles:
- role: kubespray_defaults
- role: network_plugin/cni

View File

@@ -1,55 +0,0 @@
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_service(host):
svc = host.service("containerd")
assert svc.is_running
assert svc.is_enabled
def test_version(host):
crictl = "/usr/local/bin/crictl"
path = "unix:///var/run/containerd/containerd.sock"
with host.sudo():
cmd = host.command(crictl + " --runtime-endpoint " + path + " version")
assert cmd.rc == 0
assert "RuntimeName: containerd" in cmd.stdout
@pytest.mark.parametrize('image, dest', [
('quay.io/kubespray/hello-world:latest', '/tmp/hello-world.tar')
])
def test_image_pull_save_load(host, image, dest):
nerdctl = "/usr/local/bin/nerdctl"
dest_file = host.file(dest)
with host.sudo():
pull_cmd = host.command(nerdctl + " pull " + image)
assert pull_cmd.rc ==0
with host.sudo():
save_cmd = host.command(nerdctl + " save -o " + dest + " " + image)
assert save_cmd.rc == 0
assert dest_file.exists
with host.sudo():
load_cmd = host.command(nerdctl + " load < " + dest)
assert load_cmd.rc == 0
@pytest.mark.parametrize('image', [
('quay.io/kubespray/hello-world:latest')
])
def test_run(host, image):
nerdctl = "/usr/local/bin/nerdctl"
with host.sudo():
cmd = host.command(nerdctl + " -n k8s.io run " + image)
assert cmd.rc == 0
assert "Hello from Docker" in cmd.stdout

View File

@@ -0,0 +1,39 @@
---
- name: Test containerd CRI
import_playbook: ../../../molecule/test_cri.yml
vars:
container_manager: containerd
cri_socket: unix:///var/run/containerd/containerd.sock
cri_name: containerd
- name: Test nerdctl
hosts: all
gather_facts: false
become: true
tasks:
- name: Get kubespray defaults
import_role:
name: ../../../../../kubespray_defaults
- name: Test nerdctl commands
command: "{{ bin_dir }}/nerdctl {{ item | join(' ') }}"
vars:
image: quay.io/kubespray/hello-world:latest
loop:
- - pull
- "{{ image }}"
- - save
- -o
- /tmp/hello-world.tar
- "{{ image }}"
- - load
- -i
- /tmp/hello-world.tar
- - -n
- k8s.io
- run
- "{{ image }}"
register: nerdctl
- name: Check log from running a container
assert:
that:
- ('Hello from Docker' in nerdctl.results[3].stdout)

View File

@@ -27,5 +27,6 @@ provisioner:
become: true become: true
playbooks: playbooks:
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
prepare: ../../../molecule/prepare.yml
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,48 +0,0 @@
---
- name: Prepare
hosts: all
become: true
roles:
- role: kubespray_defaults
- role: bootstrap_os
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare container runtime
hosts: all
become: true
vars:
container_manager: containerd
kube_network_plugin: cni
roles:
- role: kubespray_defaults
- role: network_plugin/cni
tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory
file:
path: /etc/cni/net.d
state: directory
owner: "{{ kube_owner }}"
mode: "0755"
- name: Setup CNI
copy:
src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}"
owner: root
mode: "0644"
with_items:
- 10-mynet.conf

View File

@@ -1,19 +0,0 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_run_pod(host):
run_command = "/usr/local/bin/crictl run --with-pull /tmp/container.json /tmp/sandbox.json"
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/cri-dockerd1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View File

@@ -0,0 +1,15 @@
---
- name: Test cri-dockerd
import_playbook: ../../../molecule/test_cri.yml
vars:
container_manager: cri-dockerd
cri_socket: unix:///var/run/cri-dockerd.sock
cri_name: docker
- name: Test running a container with docker
import_playbook: ../../../molecule/test_runtime.yml
vars:
container_runtime: docker
# cri-dockerd does not support multiple runtime handler before 0.4.0
# https://github.com/Mirantis/cri-dockerd/pull/350
# TODO: check this when we upgrade cri-dockerd

View File

@@ -43,5 +43,6 @@ provisioner:
timeout: 120 timeout: 120
playbooks: playbooks:
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
prepare: ../../../molecule/prepare.yml
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,35 +0,0 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_service(host):
svc = host.service("crio")
assert svc.is_running
assert svc.is_enabled
def test_run(host):
crictl = "/usr/local/bin/crictl"
path = "unix:///var/run/crio/crio.sock"
with host.sudo():
cmd = host.command(crictl + " --runtime-endpoint " + path + " version")
assert cmd.rc == 0
assert "RuntimeName: cri-o" in cmd.stdout
def test_run_pod(host):
runtime = "crun"
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/runc1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View File

@@ -0,0 +1,11 @@
---
- name: Test CRI-O cri
import_playbook: ../../../molecule/test_cri.yml
vars:
container_manager: crio
cri_socket: unix:///var/run/crio/crio.sock
cri_name: cri-o
- name: Test running a container with crun
import_playbook: ../../../molecule/test_runtime.yml
vars:
container_runtime: crun

View File

@@ -1,28 +1,18 @@
--- ---
role_name_check: 1 role_name_check: 1
driver:
name: vagrant
provider:
name: libvirt
platforms: platforms:
- name: ubuntu20 - cloud_image: ubuntu-2004
box: generic/ubuntu2004 name: ubuntu20
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
- name: almalinux9 - name: almalinux9
box: almalinux/9 cloud_image: almalinux-9
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
provisioner: provisioner:
name: ansible name: ansible
env: env:
@@ -31,9 +21,8 @@ provisioner:
defaults: defaults:
callbacks_enabled: profile_tasks callbacks_enabled: profile_tasks
timeout: 120 timeout: 120
inventory: playbooks:
group_vars: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
all: prepare: ../../../molecule/prepare.yml
become: true
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,49 +0,0 @@
---
- name: Prepare generic
hosts: all
become: true
roles:
- role: kubespray_defaults
- role: bootstrap_os
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare container runtime
hosts: all
become: true
vars:
container_manager: containerd
kube_network_plugin: cni
roles:
- role: kubespray_defaults
- role: network_plugin/cni
- role: container-engine/crictl
tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory
file:
path: /etc/cni/net.d
state: directory
owner: root
mode: "0755"
- name: Setup CNI
copy:
src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}"
owner: root
mode: "0644"
with_items:
- 10-mynet.conf

View File

@@ -1,29 +0,0 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_run(host):
gvisorruntime = "/usr/local/bin/runsc"
with host.sudo():
cmd = host.command(gvisorruntime + " --version")
assert cmd.rc == 0
assert "runsc version" in cmd.stdout
def test_run_pod(host):
runtime = "runsc"
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/gvisor1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View File

@@ -0,0 +1,19 @@
---
- name: Test gvisor
hosts: all
gather_facts: false
tasks:
- name: Get kubespray defaults
import_role:
name: ../../../../../kubespray_defaults
- name: Test version
command: "{{ bin_dir }}/runsc --version"
register: runsc_version
failed_when: >
runsc_version is failed or
'runsc version' not in runsc_version.stdout
- name: Test run container
import_playbook: ../../../molecule/test_runtime.yml
vars:
container_runtime: runsc

View File

@@ -1,28 +1,18 @@
--- ---
role_name_check: 1 role_name_check: 1
driver:
name: vagrant
provider:
name: libvirt
platforms: platforms:
- name: ubuntu20 - name: ubuntu20
box: generic/ubuntu2004 cloud_image: ubuntu-2004
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
- name: ubuntu22 - name: ubuntu22
box: generic/ubuntu2204 cloud_image: ubuntu-2204
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
provisioner: provisioner:
name: ansible name: ansible
env: env:
@@ -31,9 +21,8 @@ provisioner:
defaults: defaults:
callbacks_enabled: profile_tasks callbacks_enabled: profile_tasks
timeout: 120 timeout: 120
inventory: playbooks:
group_vars: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
all: prepare: ../../../molecule/prepare.yml
become: true
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,49 +0,0 @@
---
- name: Prepare
hosts: all
become: true
roles:
- role: kubespray_defaults
- role: bootstrap_os
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare container runtime
hosts: all
become: true
vars:
container_manager: containerd
kube_network_plugin: cni
roles:
- role: kubespray_defaults
- role: network_plugin/cni
- role: container-engine/crictl
tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory
file:
path: /etc/cni/net.d
state: directory
owner: "{{ kube_owner }}"
mode: "0755"
- name: Setup CNI
copy:
src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}"
owner: root
mode: "0644"
with_items:
- 10-mynet.conf

View File

@@ -1,37 +0,0 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_run(host):
kataruntime = "/opt/kata/bin/kata-runtime"
with host.sudo():
cmd = host.command(kataruntime + " version")
assert cmd.rc == 0
assert "kata-runtime" in cmd.stdout
def test_run_check(host):
kataruntime = "/opt/kata/bin/kata-runtime"
with host.sudo():
cmd = host.command(kataruntime + " check")
assert cmd.rc == 0
assert "System is capable of running" in cmd.stdout
def test_run_pod(host):
runtime = "kata-qemu"
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/kata1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View File

@@ -0,0 +1,23 @@
---
- name: Test kata-containers
hosts: all
gather_facts: false
tasks:
- name: Test version
command: "/opt/kata/bin/kata-runtime version"
register: version
failed_when: >
version is failed or
'kata-runtime' not in version.stdout
- name: Test version
command: "/opt/kata/bin/kata-runtime check"
register: check
failed_when: >
check is failed or
'System is capable of running' not in check.stdout
- name: Test run container
import_playbook: ../../../molecule/test_runtime.yml
vars:
container_runtime: kata-qemu
container_manager: containerd

View File

@@ -6,7 +6,7 @@
vars: vars:
ignore_assert_errors: true ignore_assert_errors: true
roles: roles:
- role: kubespray_defaults - role: dynamic_groups
- role: bootstrap_os - role: bootstrap_os
- role: network_facts - role: network_facts
- role: kubernetes/preinstall - role: kubernetes/preinstall
@@ -14,7 +14,7 @@
user: "{{ addusers.kube }}" user: "{{ addusers.kube }}"
tasks: tasks:
- name: Download CNI - name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml" include_tasks: "../../download/tasks/download_file.yml"
vars: vars:
download: "{{ download_defaults | combine(downloads.cni) }}" download: "{{ download_defaults | combine(downloads.cni) }}"
@@ -29,26 +29,15 @@
- role: kubespray_defaults - role: kubespray_defaults
- role: network_plugin/cni - role: network_plugin/cni
tasks: tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory - name: Create /etc/cni/net.d directory
file: file:
path: /etc/cni/net.d path: /etc/cni/net.d
state: directory state: directory
owner: "{{ kube_owner }}" owner: root
mode: "0755" mode: "0755"
- name: Setup CNI - name: Config bridge host-local CNI
copy: copy:
src: "{{ item }}" src: "10-mynet.conf"
dest: "/etc/cni/net.d/{{ item }}" dest: "/etc/cni/net.d/"
owner: root owner: root
mode: "0644" mode: "0644"
with_items:
- 10-mynet.conf

View File

@@ -1,10 +1,10 @@
{ {
"metadata": { "metadata": {
"name": "runc1" "name": "{{ container_runtime }}1"
}, },
"image": { "image": {
"image": "quay.io/kubespray/hello-world:latest" "image": "quay.io/kubespray/hello-world:latest"
}, },
"log_path": "runc1.0.log", "log_path": "{{ container_runtime }}1.0.log",
"linux": {} "linux": {}
} }

View File

@@ -1,6 +1,6 @@
{ {
"metadata": { "metadata": {
"name": "runc1", "name": "{{ container_runtime }}1",
"namespace": "default", "namespace": "default",
"attempt": 1, "attempt": 1,
"uid": "hdishd83djaidwnduwk28bcsb" "uid": "hdishd83djaidwnduwk28bcsb"

View File

@@ -0,0 +1,24 @@
---
- name: Test container manager
hosts: all
gather_facts: false
become: true
tasks:
- name: Get kubespray defaults
import_role:
name: ../../kubespray_defaults
- name: Collect services facts
ansible.builtin.service_facts:
- name: Check container manager service is running
assert:
that:
- ansible_facts.services[container_manager + '.service'].state == 'running'
- ansible_facts.services[container_manager + '.service'].status == 'enabled'
- name: Check runtime version
command: "{{ bin_dir }}/crictl --runtime-endpoint {{ cri_socket }} version"
register: cri_version
failed_when: >
cri_version is failed or
("RuntimeName: " + cri_name) not in cri_version.stdout

View File

@@ -0,0 +1,42 @@
---
- name: Test container runtime
hosts: all
gather_facts: false
become: true
roles:
- role: ../../kubespray_defaults
tasks:
- name: Copy test container files
template:
src: "{{ item }}.j2"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
loop:
- container.json
- sandbox.json
- name: Check running a container with runtime {{ container_runtime }}
block:
- name: Run container
command:
argv:
- "{{ bin_dir }}/crictl"
- run
- --with-pull
- --runtime
- "{{ container_runtime }}"
- /tmp/container.json
- /tmp/sandbox.json
- name: Check log file
slurp:
src: "/tmp/{{ container_runtime }}1.0.log"
register: log_file
failed_when: >
log_file is failed or
'Hello from Docker' not in (log_file.content | b64decode)
rescue:
- name: Display container manager config on error
command: "{{ bin_dir }}/crictl info"
- name: Check container manager logs
command: journalctl -u {{ container_manager }}
failed_when: true

View File

@@ -1,28 +1,18 @@
--- ---
role_name_check: 1 role_name_check: 1
driver:
name: vagrant
provider:
name: libvirt
platforms: platforms:
- name: ubuntu20 - cloud_image: ubuntu-2004
box: generic/ubuntu2004 name: ubuntu20
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
- name: almalinux9 - name: almalinux9
box: almalinux/9 cloud_image: almalinux-9
cpus: 1 vm_cpu_cores: 1
memory: 1024 vm_memory: 1024
nested: true node_groups:
groups:
- kube_control_plane - kube_control_plane
provider_options:
driver: kvm
provisioner: provisioner:
name: ansible name: ansible
env: env:
@@ -31,9 +21,8 @@ provisioner:
defaults: defaults:
callbacks_enabled: profile_tasks callbacks_enabled: profile_tasks
timeout: 120 timeout: 120
inventory: playbooks:
group_vars: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
all: prepare: ../../../molecule/prepare.yml
become: true
verifier: verifier:
name: testinfra name: ansible

View File

@@ -1,49 +0,0 @@
---
- name: Prepare generic
hosts: all
become: true
roles:
- role: kubespray_defaults
- role: bootstrap_os
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"
- name: Prepare container runtime
hosts: all
become: true
vars:
container_manager: crio
kube_network_plugin: cni
roles:
- role: kubespray_defaults
- role: network_plugin/cni
- role: container-engine/crictl
tasks:
- name: Copy test container files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
owner: root
mode: "0644"
with_items:
- container.json
- sandbox.json
- name: Create /etc/cni/net.d directory
file:
path: /etc/cni/net.d
state: directory
owner: root
mode: "0755"
- name: Setup CNI
copy:
src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}"
owner: root
mode: "0644"
with_items:
- 10-mynet.conf

View File

@@ -1,29 +0,0 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_run(host):
youkiruntime = "/usr/local/bin/youki"
with host.sudo():
cmd = host.command(youkiruntime + " --version")
assert cmd.rc == 0
assert "youki" in cmd.stdout
def test_run_pod(host):
runtime = "youki"
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
with host.sudo():
cmd = host.command(run_command)
assert cmd.rc == 0
with host.sudo():
log_f = host.file("/tmp/youki1.0.log")
assert log_f.exists
assert b"Hello from Docker" in log_f.content

View File

@@ -0,0 +1,19 @@
---
- name: Test youki
hosts: all
gather_facts: false
tasks:
- name: Get kubespray defaults
import_role:
name: ../../../../../kubespray_defaults
- name: Test version
command: "{{ bin_dir }}/youki --version"
register: youki_version
failed_when: >
youki_version is failed or
'youki' not in youki_version.stdout
- name: Test run container
import_playbook: ../../../molecule/test_runtime.yml
vars:
container_runtime: youki

View File

@@ -0,0 +1,19 @@
---
- name: Match needed groups by their old names or definition
vars:
group_mappings:
kube_control_plane:
- kube-master
kube_node:
- kube-node
calico_rr:
- calico-rr
no_floating:
- no-floating
k8s_cluster:
- kube_node
- kube_control_plane
- calico_rr
group_by:
key: "{{ (group_names | intersect(item.value) | length > 0) | ternary(item.key, '_all') }}"
loop: "{{ group_mappings | dict2items }}"