CI: Use the debug stdout callback instead of manual debug

This display in a readable (by humans) way the result of most tasks, and
should be way more readable that what we have now, which is frequently a
bunch of unreadable json.

+ some small fixes (using delegated_to instead of when
  <control_plane_host>)
This commit is contained in:
Max Gautier
2024-12-12 16:46:51 +01:00
parent 12ed1fcf93
commit f6d1c294d4
6 changed files with 71 additions and 148 deletions

View File

@@ -29,9 +29,6 @@
register: csr_json
changed_when: false
- debug: # noqa name[missing]
var: csrs
- name: Check there are csrs
assert:
that: csrs | length > 0
@@ -67,17 +64,13 @@
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout
- debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}"
- name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test"
changed_when: false
- name: Run 2 agnhost pods in test ns
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
cmd: "{{ bin_dir }}/kubectl apply --namespace test -f -"
stdin: |
apiVersion: apps/v1
kind: Deployment
@@ -107,52 +100,35 @@
type: RuntimeDefault
changed_when: false
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check that all pods are running and ready
vars:
pods: "{{ (pods_json | from_json)['items'] }}"
block:
- name: Check Deployment is ready
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180"
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180s"
changed_when: false
rescue:
- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods
register: pods_json
- name: Get running pods
command: "{{ bin_dir }}/kubectl get pods -n test -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
changed_when: false
register: running_pods
- name: Check pods IP are in correct network
assert:
that: pods
| selectattr('status.phase', '==', 'Running')
| selectattr('status.podIP', 'ansible.utils.in_network', kube_pods_subnet)
| length == 2
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
register: get_pods
- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP }}:8080"
with_nested:
- "{{ pods }}"
- "{{ pods }}"
rescue:
- name: List pods cluster-wide
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Set networking facts
set_fact:
kube_pods_subnet: 10.233.64.0/18
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute='status.podIP') | list }}"
pods_running: |
{% set list = running_pods.stdout.split(" ") %}
{{ list }}
- name: Check pods IP are in correct network
assert:
that: item | ansible.utils.ipaddr(kube_pods_subnet)
when:
- item in pods_running
with_items: "{{ pod_ips }}"
- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- curl {{ item[1] }}:8080"
with_nested:
- "{{ pod_names }}"
- "{{ pod_ips }}"
- import_role: # noqa name[missing]
name: cluster-dump
- fail: # noqa name[missing]