From c3d4864e63665d3e0f653f74e8aaf6ff14720c87 Mon Sep 17 00:00:00 2001 From: Yang-Ming Lin <11317013+yangminglintw@users.noreply.github.com> Date: Tue, 28 Apr 2026 10:24:48 +0800 Subject: [PATCH] Refactor(defaults): centralize etcd defaults (#13161) --- roles/etcd/tasks/check_certs.yml | 6 ++++-- roles/etcd/tasks/gen_certs_script.yml | 14 +++++++------- roles/etcd/tasks/main.yml | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index a4829b922..39e5ac574 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -13,6 +13,8 @@ sync_certs: false gen_certs: false etcd_secret_changed: false + etcd_member_requires_sync: false + kubernetes_host_requires_sync: false - name: "Check certs | Register ca and etcd admin/member certs on etcd hosts" stat: @@ -128,7 +130,7 @@ set_fact: sync_certs: true when: - - etcd_member_requires_sync | default(false) or - kubernetes_host_requires_sync | default(false) or + - etcd_member_requires_sync or + kubernetes_host_requires_sync or 'gen_master_certs_True' in group_names or 'gen_node_certs_True' in group_names diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 364b30c4d..ab820fdfd 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -24,7 +24,7 @@ run_once: true delegate_to: "{{ groups['etcd'][0] }}" when: - - gen_certs | default(false) + - gen_certs - inventory_hostname == groups['etcd'][0] - name: Gen_certs | copy certs generation script @@ -43,7 +43,7 @@ HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['kube_control_plane']) | join(' ') }}" run_once: true delegate_to: "{{ groups['etcd'][0] }}" - when: gen_certs | default(false) + when: gen_certs notify: Set etcd_secret_changed - name: Gen_certs | run cert generation script for all clients @@ -55,7 +55,7 @@ when: - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally - kube_network_plugin != "calico" or calico_datastore == "etcd" - - gen_certs | default(false) + - gen_certs notify: Set etcd_secret_changed - name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node @@ -78,7 +78,7 @@ delegate_to: "{{ groups['etcd'][0] }}" when: - ('etcd' in group_names) - - sync_certs | default(false) + - sync_certs - inventory_hostname != groups['etcd'][0] notify: Set etcd_secret_changed @@ -92,7 +92,7 @@ with_items: "{{ etcd_master_certs.results }}" when: - ('etcd' in group_names) - - sync_certs | default(false) + - sync_certs - inventory_hostname != groups['etcd'][0] loop_control: label: "{{ item.item }}" @@ -134,7 +134,7 @@ include_tasks: gen_nodes_certs_script.yml when: - ('kube_control_plane' in group_names) and - sync_certs | default(false) and inventory_hostname not in groups['etcd'] + sync_certs and inventory_hostname not in groups['etcd'] - name: Gen_certs | Generate etcd certs on nodes if needed include_tasks: gen_nodes_certs_script.yml @@ -142,7 +142,7 @@ - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally - kube_network_plugin != "calico" or calico_datastore == "etcd" - ('k8s_cluster' in group_names) and - sync_certs | default(false) and inventory_hostname not in groups['etcd'] + sync_certs and inventory_hostname not in groups['etcd'] # This is a hack around the fact kubeadm expect the same certs path on all kube_control_plane # TODO: fix certs generation to have the same file everywhere diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 5f77892b1..14e51331f 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -84,7 +84,7 @@ when: - ('etcd' in group_names) - etcd_cluster_setup - - etcd_secret_changed | default(false) + - etcd_secret_changed - name: Restart etcd-events if certs changed command: /bin/true @@ -92,7 +92,7 @@ when: - ('etcd' in group_names) - etcd_events_cluster_setup - - etcd_secret_changed | default(false) + - etcd_secret_changed # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing`