mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Rename ansible groups to use _ instead of - (#7552)
* rename ansible groups to use _ instead of -
k8s-cluster -> k8s_cluster
k8s-node -> k8s_node
calico-rr -> calico_rr
no-floating -> no_floating
Note: kube-node,k8s-cluster groups in upgrade CI
need clean-up after v2.16 is tagged
* ensure old groups are mapped to the new ones
This commit is contained in:
@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube_control_plane", "kube-node", "etcd"]:
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
||||
@@ -21,13 +21,13 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-node"
|
||||
"roles": "kube_node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
@@ -112,4 +112,4 @@
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ test_distro() {
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
||||
@@ -44,8 +44,8 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load']
|
||||
@@ -269,7 +269,7 @@ class KubesprayInventory(object):
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
@@ -290,7 +290,7 @@ class KubesprayInventory(object):
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s-cluster:children':
|
||||
elif group != 'k8s_cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
@@ -307,37 +307,37 @@ class KubesprayInventory(object):
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
self.add_host_to_group('kube_node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
|
||||
@@ -241,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube_control_plane']
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
@@ -251,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube-node'
|
||||
group = 'kube_node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
@@ -280,7 +280,7 @@ class TestInventory(unittest.TestCase):
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
@@ -296,7 +296,7 @@ class TestInventory(unittest.TestCase):
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
|
||||
@@ -23,15 +23,15 @@
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube-node]
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
|
||||
@@ -3,7 +3,7 @@ all:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
children:
|
||||
k8s-cluster:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
@@ -13,7 +13,7 @@ all:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube-node:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
||||
@@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
|
||||
@@ -11,7 +11,7 @@ ${public_ip_address_bastion}
|
||||
${list_master}
|
||||
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_node}
|
||||
|
||||
|
||||
@@ -19,10 +19,10 @@ ${list_node}
|
||||
${list_etcd}
|
||||
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
|
||||
@@ -11,9 +11,9 @@ supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube-node
|
||||
kube_node
|
||||
|
||||
@@ -65,12 +65,12 @@ for name in "${MASTER_NAMES[@]}"; do
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-node]"
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s-cluster:children]"
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube-node"
|
||||
echo "kube_node"
|
||||
|
||||
@@ -263,8 +263,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
@@ -421,7 +421,7 @@ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||
`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
|
||||
be able to access your machines tunneling through the bastion's IP address. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
@@ -546,7 +546,7 @@ bin_dir: /opt/bin
|
||||
cloud_provider: openstack
|
||||
```
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
||||
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
|
||||
@@ -204,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,no-floating"
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -462,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -507,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@@ -548,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -593,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
@@ -177,12 +177,12 @@ variable "external_net" {
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||
description = "supplementary kubespray ansible groups for masters, such kube_node"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "supplementary_node_groups" {
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress"
|
||||
default = ""
|
||||
}
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ While the defaults in variables.tf will successfully deploy a cluster, it is rec
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
`\# kubeconfig_localhost: false`
|
||||
becomes:
|
||||
`kubeconfig_localhost: true`
|
||||
|
||||
@@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
@@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
@@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ ${list_master}
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube-node
|
||||
kube_node
|
||||
|
||||
@@ -9,9 +9,9 @@ ${list_master}
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube-node
|
||||
kube_node
|
||||
|
||||
Reference in New Issue
Block a user