mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 05:45:06 +03:00
Replace kube-master with kube_control_plane (#7256)
This replaces kube-master with kube_control_plane because of [1]:
The Kubernetes project is moving away from wording that is
considered offensive. A new working group WG Naming was created
to track this work, and the word "master" was declared as offensive.
A proposal was formalized for replacing the word "master" with
"control plane". This means it should be removed from source code,
documentation, and user-facing configuration from Kubernetes and
its sub-projects.
NOTE: The reason why this changes it to kube_control_plane not
kube-control-plane is for valid group names on ansible.
[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
This commit is contained in:
@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
for group in ["kube_control_plane", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -30,4 +30,4 @@
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
kube_control_plane
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -30,5 +30,5 @@
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
kube_control_plane
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-master,etcd"
|
||||
"roles": "kube_control_plane,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
||||
@@ -44,7 +44,7 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
@@ -299,21 +299,23 @@ class KubesprayInventory(object):
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube-master', host)
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
@@ -330,10 +332,10 @@ class KubesprayInventory(object):
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
|
||||
@@ -223,7 +223,7 @@ class TestInventory(unittest.TestCase):
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube-master'
|
||||
group = 'kube_control_plane'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
@@ -242,7 +242,7 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
expected_hosts = ['kube-node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
|
||||
@@ -19,6 +19,6 @@
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube-master]
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
@@ -19,4 +19,4 @@
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
||||
@@ -7,7 +7,7 @@ all:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube-master:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
|
||||
@@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
|
||||
|
||||
```commandline
|
||||
# Get the controller's IP address.
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||
|
||||
# Get the hostname of the load balancer.
|
||||
|
||||
@@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
||||
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#Add AWS Roles for Kubernetes
|
||||
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
resource "aws_iam_role" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
@@ -40,9 +40,9 @@ EOF
|
||||
|
||||
#Add AWS Policies for Kubernetes
|
||||
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
resource "aws_iam_role_policy" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = aws_iam_role.kube-master.id
|
||||
role = aws_iam_role.kube_control_plane.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
@@ -130,9 +130,9 @@ EOF
|
||||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
resource "aws_iam_instance_profile" "kube_control_plane" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = aws_iam_role.kube-master.name
|
||||
role = aws_iam_role.kube_control_plane.name
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
output "kube-master-profile" {
|
||||
value = aws_iam_instance_profile.kube-master.name
|
||||
output "kube_control_plane-profile" {
|
||||
value = aws_iam_instance_profile.kube_control_plane.name
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
|
||||
@@ -7,7 +7,7 @@ ${public_ip_address_bastion}
|
||||
[bastion]
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ ${list_etcd}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
kube_control_plane
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[kube-master:vars]
|
||||
[kube_control_plane:vars]
|
||||
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||
|
||||
[etcd]
|
||||
@@ -15,5 +15,5 @@ ${list_master}
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube_control_plane
|
||||
kube-node
|
||||
|
||||
@@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master]"
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master:vars]"
|
||||
echo "[kube_control_plane:vars]"
|
||||
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
@@ -72,5 +72,5 @@ done
|
||||
|
||||
echo ""
|
||||
echo "[k8s-cluster:children]"
|
||||
echo "kube-master"
|
||||
echo "kube_control_plane"
|
||||
echo "kube-node"
|
||||
|
||||
@@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
@@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
@@ -13,5 +13,5 @@ ${list_master}
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube_control_plane
|
||||
kube-node
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
@@ -13,5 +13,5 @@ ${list_master}
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube_control_plane
|
||||
kube-node
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
---
|
||||
- import_tasks: sync_kube_master_certs.yml
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- import_tasks: sync_kube_node_certs.yml
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue admin certs to kube-master hosts
|
||||
# Issue admin certs to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "admin"
|
||||
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-master
|
||||
issue_cert_role: kube_control_plane
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_admin_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- name: gen_certs_vault | Set fact about certificate alt names
|
||||
set_fact:
|
||||
kube_cert_alt_names: >-
|
||||
{{
|
||||
groups['kube-master'] +
|
||||
groups['kube_control_plane'] +
|
||||
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
|
||||
['localhost']
|
||||
}}
|
||||
@@ -36,18 +36,18 @@
|
||||
when: loadbalancer_apiserver is defined
|
||||
run_once: true
|
||||
|
||||
# Issue master components certs to kube-master hosts
|
||||
# Issue master components certs to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "kubernetes"
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_run_once: true
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
{%- for host in groups['kube_control_plane'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
@@ -61,11 +61,11 @@
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-master
|
||||
issue_cert_role: kube_control_plane
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
notify: set secret_changed
|
||||
|
||||
# Issue node certs to k8s-cluster nodes
|
||||
@@ -100,7 +100,7 @@
|
||||
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue front proxy cert to kube-master hosts
|
||||
# Issue front proxy cert to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "front-proxy-client"
|
||||
@@ -109,10 +109,10 @@
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
{%- for host in groups['kube_control_plane'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
@@ -130,5 +130,5 @@
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
notify: set secret_changed
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
|
||||
@@ -49,7 +49,7 @@
|
||||
sync_file: front-proxy-ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
|
||||
@@ -61,7 +61,7 @@
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["front-proxy-client.pem"]
|
||||
@@ -81,7 +81,7 @@
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_node_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube-master sync_file results
|
||||
- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results
|
||||
set_fact:
|
||||
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
|
||||
@@ -166,16 +166,16 @@ vault_pki_mounts:
|
||||
description: "Kubernetes Root CA"
|
||||
cert_dir: "{{ kube_cert_dir }}"
|
||||
roles:
|
||||
- name: kube-master
|
||||
group: kube-master
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
|
||||
- name: kube_control_plane
|
||||
group: kube_control_plane
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:masters"
|
||||
- name: front-proxy-client
|
||||
group: kube-master
|
||||
group: kube_control_plane
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
|
||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
|
||||
gen_ca_copy_group: "kube-master"
|
||||
gen_ca_copy_group: "kube_control_plane"
|
||||
when: >-
|
||||
inventory_hostname in groups.vault
|
||||
and not vault_cluster_is_initialized
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
vars:
|
||||
sync_file: "ca.pem"
|
||||
sync_file_dir: "{{ vault_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: vault
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: false
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
|
||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
|
||||
gen_ca_copy_group: "kube-master"
|
||||
gen_ca_copy_group: "kube_control_plane"
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- include_tasks: ../shared/auth_backend.yml
|
||||
|
||||
Reference in New Issue
Block a user