Resolve ansible-lint name errors (#10253)

* project: fix ansible-lint name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: ignore jinja template error in names

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: capitalize ansible name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: update notify after name capitalization

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
This commit is contained in:
Arthur Outhenin-Chalandre
2023-07-26 16:36:22 +02:00
committed by GitHub
parent b9e3861385
commit 36e5d742dc
162 changed files with 842 additions and 675 deletions

View File

@@ -1,6 +1,7 @@
---
- import_tasks: credentials-check.yml
- name: OCI Cloud Controller | Check Oracle Cloud credentials
import_tasks: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:

View File

@@ -59,7 +59,8 @@
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- include_tasks: oci.yml
- name: Configure Oracle Cloud provider
include_tasks: oci.yml
tags: oci
when:
- cloud_provider is defined

View File

@@ -1,6 +1,6 @@
---
- name: crun | Copy runtime class manifest
- name: Crun | Copy runtime class manifest
template:
src: runtimeclass-crun.yml
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
@@ -8,7 +8,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: crun | Apply manifests
- name: Crun | Apply manifests
kube:
name: "runtimeclass-crun"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,5 +1,5 @@
---
- name: gVisor | Create addon dir
- name: GVisor | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/gvisor"
owner: root
@@ -7,12 +7,12 @@
mode: 0755
recurse: true
- name: gVisor | Templates List
- name: GVisor | Templates List
set_fact:
gvisor_templates:
- { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
- name: gVisort | Create manifests
- name: GVisort | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
@@ -22,7 +22,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: gVisor | Apply manifests
- name: GVisor | Apply manifests
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,6 +1,6 @@
---
- name: youki | Copy runtime class manifest
- name: Youki | Copy runtime class manifest
template:
src: runtimeclass-youki.yml
dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
@@ -8,7 +8,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: youki | Apply manifests
- name: Youki | Apply manifests
kube:
name: "runtimeclass-youki"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,5 +1,6 @@
---
- include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Check Azure credentials
include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Write Azure CSI cloud-config
template:

View File

@@ -1,5 +1,6 @@
---
- include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Check Cinder credentials
include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Write cacert file
include_tasks: cinder-write-cacert.yml

View File

@@ -1,7 +1,8 @@
---
- include_tasks: vsphere-credentials-check.yml
- name: VSphere CSI Driver | Check vsphare credentials
include_tasks: vsphere-credentials-check.yml
- name: vSphere CSI Driver | Generate CSI cloud-config
- name: VSphere CSI Driver | Generate CSI cloud-config
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
@@ -10,7 +11,7 @@
- vsphere-csi-cloud-config
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Generate Manifests
- name: VSphere CSI Driver | Generate Manifests
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
@@ -27,7 +28,7 @@
register: vsphere_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Apply Manifests
- name: VSphere CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item }}"
@@ -40,13 +41,13 @@
loop_control:
label: "{{ item.item }}"
- name: vSphere CSI Driver | Generate a CSI secret manifest
- name: VSphere CSI Driver | Generate a CSI secret manifest
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs | bool) }}"
- name: vSphere CSI Driver | Apply a CSI secret manifest
- name: VSphere CSI Driver | Apply a CSI secret manifest
command:
cmd: "{{ kubectl }} apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"

View File

@@ -1,5 +1,6 @@
---
- include_tasks: openstack-credential-check.yml
- name: External OpenStack Cloud Controller | Check OpenStack credentials
include_tasks: openstack-credential-check.yml
tags: external-openstack
- name: External OpenStack Cloud Controller | Get base64 cacert

View File

@@ -1,5 +1,6 @@
---
- include_tasks: vsphere-credentials-check.yml
- name: External vSphere Cloud Controller | Check vsphere credentials
include_tasks: vsphere-credentials-check.yml
- name: External vSphere Cloud Controller | Generate CPI cloud-config
template:

View File

@@ -1,6 +1,6 @@
---
- name: kube-router | Start Resources
- name: Kube-router | Start Resources
kube:
name: "kube-router"
kubectl: "{{ bin_dir }}/kubectl"
@@ -11,7 +11,7 @@
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
- name: Kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1

View File

@@ -1,5 +1,5 @@
---
- name: check if snapshot namespace exists
- name: Check if snapshot namespace exists
register: snapshot_namespace_exists
kube:
kubectl: "{{ bin_dir }}/kubectl"