Rename master to control plane - non-breaking changes only (#11394)

K8s is moving away from the "master" terminology, so kubespray should follow the same naming conventions. See 65d886bb30/sig-architecture/naming/recommendations/001-master-control-plane.md
This commit is contained in:
Bogdan Sass
2024-09-06 09:56:19 +03:00
committed by GitHub
parent d4bf3b9dc7
commit 4b324cb0f0
37 changed files with 165 additions and 138 deletions

View File

@@ -5,7 +5,7 @@ upgrade_cluster_setup: false
# By default the external API listens on all interfaces, this can be changed to
# listen on a specific address/interface.
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
# A port range to reserve for services with NodePort visibility.
@@ -38,7 +38,7 @@ kube_controller_manager_leader_elect_renew_deadline: 10s
# discovery_timeout modifies the discovery timeout
discovery_timeout: 5m0s
# Instruct first master to refresh kubeadm token
# Instruct first control plane node to refresh kubeadm token
kubeadm_refresh_token: true
# Scale down coredns replicas to 0 if not using coredns dns_mode

View File

@@ -1,16 +1,16 @@
---
- name: Master | reload systemd
- name: Control plane | reload systemd
systemd_service:
daemon_reload: true
listen: Master | restart kubelet
listen: Control plane | restart kubelet
- name: Master | reload kubelet
- name: Control plane | reload kubelet
service:
name: kubelet
state: restarted
listen: Master | restart kubelet
listen: Control plane | restart kubelet
- name: Master | Remove apiserver container docker
- name: Control plane | Remove apiserver container docker
shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
@@ -19,9 +19,9 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart apiserver
listen: Control plane | Restart apiserver
- name: Master | Remove apiserver container containerd/crio
- name: Control plane | Remove apiserver container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@@ -30,9 +30,9 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart apiserver
listen: Control plane | Restart apiserver
- name: Master | Remove scheduler container docker
- name: Control plane | Remove scheduler container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
@@ -41,9 +41,9 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-scheduler
listen: Control plane | Restart kube-scheduler
- name: Master | Remove scheduler container containerd/crio
- name: Control plane | Remove scheduler container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@@ -52,9 +52,9 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-scheduler
listen: Control plane | Restart kube-scheduler
- name: Master | Remove controller manager container docker
- name: Control plane | Remove controller manager container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
@@ -63,9 +63,9 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-controller-manager
listen: Control plane | Restart kube-controller-manager
- name: Master | Remove controller manager container containerd/crio
- name: Control plane | Remove controller manager container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@@ -74,9 +74,9 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-controller-manager
listen: Control plane | Restart kube-controller-manager
- name: Master | wait for kube-scheduler
- name: Control plane | wait for kube-scheduler
vars:
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
@@ -87,10 +87,10 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-scheduler
- Control plane | restart kubelet
- Control plane | Restart kube-scheduler
- name: Master | wait for kube-controller-manager
- name: Control plane | wait for kube-controller-manager
vars:
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
@@ -101,10 +101,10 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-controller-manager
- Control plane | restart kubelet
- Control plane | Restart kube-controller-manager
- name: Master | wait for the apiserver to be running
- name: Control plane | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: false
@@ -113,5 +113,5 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart apiserver
- Control plane | restart kubelet
- Control plane | Restart apiserver

View File

@@ -23,7 +23,7 @@
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
when: secrets_encryption_file.stat.exists
- name: Set kube_encrypt_token across master nodes
- name: Set kube_encrypt_token across control plane nodes
set_fact:
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
delegate_to: "{{ item }}"

View File

@@ -12,6 +12,6 @@
- kubelet.conf
- scheduler.conf
notify:
- "Master | Restart kube-controller-manager"
- "Master | Restart kube-scheduler"
- "Master | reload kubelet"
- "Control plane | Restart kube-controller-manager"
- "Control plane | Restart kube-scheduler"
- "Control plane | reload kubelet"

View File

@@ -189,7 +189,7 @@
mode: "0644"
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: Kubeadm | Initialize first master
- name: Kubeadm | Initialize first control plane node
command: >-
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
{{ bin_dir }}/kubeadm init
@@ -205,7 +205,7 @@
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Set kubeadm certificate key
set_fact:
@@ -250,7 +250,7 @@
tags:
- kubeadm_token
- name: Kubeadm | Join other masters
- name: Kubeadm | Join other control plane nodes
include_tasks: kubeadm-secondary.yml
- name: Kubeadm | upgrade kubernetes cluster
@@ -260,7 +260,7 @@
- kubeadm_already_run.stat.exists
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: Kubeadm | Remove taint for master with node role
- name: Kubeadm | Remove taint for control plane node with node role
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ first_kube_control_plane }}"
with_items:

View File

@@ -9,7 +9,7 @@
delay: 5
until: _result.status == 200
- name: Kubeadm | Upgrade first master
- name: Kubeadm | Upgrade first control plane node
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@@ -28,9 +28,9 @@
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Kubeadm | Upgrade other masters
- name: Kubeadm | Upgrade other control plane nodes
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@@ -49,7 +49,7 @@
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Kubeadm | Remove binding to anonymous user
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"

View File

@@ -6,7 +6,7 @@
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: true
notify:
- "Master | reload kubelet"
- "Control plane | reload kubelet"
- name: Fixup kubelet client cert rotation 2/2
lineinfile:
@@ -15,4 +15,4 @@
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: true
notify:
- "Master | reload kubelet"
- "Control plane | reload kubelet"

View File

@@ -1,5 +1,5 @@
---
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
- name: "Pre-upgrade | Delete control plane manifests if etcd secrets changed"
file:
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
state: absent
@@ -8,14 +8,14 @@
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed | default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
- name: "Pre-upgrade | Delete control plane containers forcefully" # noqa no-handler
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
register: remove_master_container
register: remove_control_plane_container
retries: 10
until: remove_master_container.rc == 0
until: remove_control_plane_container.rc == 0
delay: 1

View File

@@ -71,7 +71,7 @@
owner: "root"
mode: "0644"
when:
- not is_kube_master
- ('kube_control_plane' not in group_names)
- not kubelet_conf.stat.exists
- kubeadm_use_file_discovery
@@ -81,7 +81,7 @@
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: true
mode: "0640"
when: not is_kube_master
when: ('kube_control_plane' not in group_names)
- name: Kubeadm | Create directory to store kubeadm patches
file:
@@ -101,7 +101,9 @@
- name: Join to cluster if needed
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
when: not is_kube_master and (not kubelet_conf.stat.exists)
when:
- ('kube_control_plane' not in group_names)
- not kubelet_conf.stat.exists
block:
- name: Join to cluster
@@ -143,7 +145,7 @@
backup: true
when:
- kubeadm_config_api_fqdn is not defined
- not is_kube_master
- ('kube_control_plane' not in group_names)
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
notify: Kubeadm | restart kubelet
@@ -154,7 +156,7 @@
line: ' server: {{ kube_apiserver_endpoint }}'
backup: true
when:
- not is_kube_master
- ('kube_control_plane' not in group_names)
- loadbalancer_apiserver is defined
notify: Kubeadm | restart kubelet
@@ -169,8 +171,8 @@
tags:
- kube-proxy
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
# incorrectly to first master, creating SPoF.
# FIXME(mattymo): Need to point to localhost, otherwise control plane nodes will all point
# incorrectly to first control plane node, creating SPoF.
- name: Update server field in kube-proxy kubeconfig
shell: >-
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml

View File

@@ -42,7 +42,7 @@ kube_memory_reserved: 256Mi
kube_cpu_reserved: 100m
# kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
kube_master_memory_reserved: 512Mi
kube_master_cpu_reserved: 200m
# kube_master_ephemeral_storage_reserved: 2Gi
@@ -56,7 +56,7 @@ system_memory_reserved: 512Mi
system_cpu_reserved: 500m
# system_ephemeral_storage_reserved: 2Gi
# system_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
system_master_memory_reserved: 256Mi
system_master_cpu_reserved: 250m
# system_master_ephemeral_storage_reserved: 2Gi
@@ -136,7 +136,7 @@ kubelet_config_extra_args_cgroupfs:
systemCgroups: /system.slice
cgroupRoot: /
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control plane nodes
kubelet_node_config_extra_args: {}
# Maximum number of container log files that can be present for a container.
@@ -148,7 +148,7 @@ kubelet_logfiles_max_size: 10Mi
## Support custom flags to be passed to kubelet
kubelet_custom_flags: []
## Support custom flags to be passed to kubelet only on nodes, not masters
## Support custom flags to be passed to kubelet only on nodes, not control plane nodes
kubelet_node_custom_flags: []
# If non-empty, will use this string as identification instead of the actual hostname
@@ -216,7 +216,7 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default(''
# azure_vmtype: standard
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
azure_loadbalancer_sku: basic
# excludes master nodes from standard load balancer.
# excludes control plane nodes from standard load balancer.
azure_exclude_master_from_standard_lb: true
# disables the outbound SNAT for public load balancer rules
azure_disable_outbound_snat: false

View File

@@ -24,7 +24,7 @@
- name: Install kube-vip
import_tasks: loadbalancer/kube-vip.yml
when:
- is_kube_master
- ('kube_control_plane' in group_names)
- kube_vip_enabled
tags:
- kube-vip
@@ -32,7 +32,7 @@
- name: Install nginx-proxy
import_tasks: loadbalancer/nginx-proxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
@@ -41,7 +41,7 @@
- name: Install haproxy
import_tasks: loadbalancer/haproxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:

View File

@@ -64,7 +64,7 @@ clusterDNS:
kubeReservedCgroup: {{ kube_reserved_cgroups }}
{% endif %}
kubeReserved:
{% if is_kube_master | bool %}
{% if 'kube_control_plane' in group_names %}
cpu: "{{ kube_master_cpu_reserved }}"
memory: {{ kube_master_memory_reserved }}
{% if kube_master_ephemeral_storage_reserved is defined %}
@@ -86,7 +86,7 @@ kubeReserved:
{% if system_reserved | bool %}
systemReservedCgroup: {{ system_reserved_cgroups }}
systemReserved:
{% if is_kube_master | bool %}
{% if 'kube_control_plane' in group_names %}
cpu: "{{ system_master_cpu_reserved }}"
memory: {{ system_master_memory_reserved }}
{% if system_master_ephemeral_storage_reserved is defined %}
@@ -106,10 +106,10 @@ systemReserved:
{% endif %}
{% endif %}
{% endif %}
{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
{% if ('kube_control_plane' in group_names) and (eviction_hard_control_plane is defined) and eviction_hard_control_plane %}
evictionHard:
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
{% elif ('kube_control_plane' not in group_names) and (eviction_hard is defined) and eviction_hard %}
evictionHard:
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
{% endif %}

View File

@@ -60,7 +60,7 @@
- not ignore_assert_errors
- inventory_hostname in groups.get('etcd',[])
- name: Stop if memory is too small for masters
- name: Stop if memory is too small for control plane nodes
assert:
that: ansible_memtotal_mb >= minimal_master_memory_mb
when:

View File

@@ -15,7 +15,8 @@
- bootstrap-os
- apps
- network
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- node
with_items:
- "{{ kube_config_dir }}"
@@ -39,7 +40,8 @@
- bootstrap-os
- apps
- network
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- node
with_items:
- "{{ kube_cert_dir }}"

View File

@@ -1,12 +1,12 @@
---
- name: "Check_tokens | check if the tokens have already been generated on first master"
- name: "Check_tokens | check if the tokens have already been generated on first control plane node"
stat:
path: "{{ kube_token_dir }}/known_tokens.csv"
get_attributes: false
get_checksum: true
get_mime: false
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: known_tokens_master
register: known_tokens_control_plane
run_once: true
- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false"
@@ -17,7 +17,7 @@
- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
set_fact:
gen_tokens: true
when: not known_tokens_master.stat.exists and kube_token_auth | default(true)
when: not known_tokens_control_plane.stat.exists and kube_token_auth | default(true)
run_once: true
- name: "Check tokens | check if a cert already exists"
@@ -34,7 +34,7 @@
{%- set tokens = {'sync': False} -%}
{%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
if (not hostvars[server].known_tokens.stat.exists) or
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%}
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_control_plane.stat.checksum | default('')) -%}
{%- set _ = tokens.update({'sync': True}) -%}
{%- endfor -%}
{{ tokens.sync }}

View File

@@ -8,15 +8,15 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
- name: Gen_tokens | generate tokens for master components
- name: Gen_tokens | generate tokens for control plane components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube_control_plane'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
register: gentoken_control_plane
changed_when: "'Added' in gentoken_control_plane.stdout"
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
@@ -34,7 +34,7 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
- name: Gen_tokens | Get list of tokens from first master
- name: Gen_tokens | Get list of tokens from first control plane node
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list
check_mode: false
@@ -52,7 +52,7 @@
run_once: true
when: sync_tokens | default(false)
- name: Gen_tokens | Copy tokens on masters
- name: Gen_tokens | Copy tokens on control plane nodes
shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
args:
executable: /bin/bash