refact ip stack (#11953)

This commit is contained in:
Boris
2025-02-11 14:37:58 +03:00
committed by GitHub
parent c557adf911
commit a51e7dd07d
64 changed files with 470 additions and 208 deletions

View File

@@ -36,11 +36,21 @@
- .cache/pip
policy: pull-push # TODO: change to "pull" when not on main
vagrant_ubuntu20-calico-dual-stack:
vagrant_ubuntu24-calico-dual-stack:
stage: deploy-extended
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu24-calico-ipv6only-stack:
stage: deploy-extended
extends: .vagrant
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel:
stage: deploy-part1

8
Vagrantfile vendored
View File

@@ -210,14 +210,20 @@ Vagrant.configure("2") do |config|
end
ip = "#{$subnet}.#{i+100}"
ip6 = "#{$subnet_ipv6}::#{i+100}"
node.vm.network :private_network,
:ip => ip,
:libvirt__guest_ipv6 => 'yes',
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
:libvirt__ipv6_address => ip6,
:libvirt__ipv6_prefix => "64",
:libvirt__forward_mode => "none",
:libvirt__dhcp_enabled => false
# libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64)
# add default route for detect ansible_default_ipv6
# TODO: fix libvirt__ipv6 or use $subnet in shell
config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true"
# Disable swap for each vm
node.vm.provision "shell", inline: "swapoff -a"

View File

@@ -273,6 +273,7 @@ def openstack_host(resource, module_name):
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'access_ip': raw_attrs['access_ip_v4'],
'access_ip6': raw_attrs['access_ip_v6'],
'ip': raw_attrs['network.0.fixed_ip_v4'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),

View File

@@ -41,8 +41,12 @@ Some variables of note include:
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
and access_ip are undefined
* *ip6* - IPv6 address to use for binding services. (host var)
If *enable_dual_stack_networks* is set to ``true`` and *ip6* is defined,
If *ipv6_stack*(*enable_dual_stack_networks* deprecated) is set to ``true`` and *ip6* is defined,
kubelet's ``--node-ip`` and node's ``InternalIP`` will be the combination of *ip* and *ip6*.
Similarly used for ipv6only scheme.
* *access_ip6* - similarly ``access_ip`` but IPv6
* *ansible_default_ipv6.address* - Not Kubespray-specific, but it is used if ip6
and access_ip6 are undefined
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
address instead of localhost for kube_control_planes and kube_control_plane[0] for
kube_nodes. See more details in the
@@ -52,6 +56,20 @@ Some variables of note include:
`loadbalancer_apiserver`. See more details in the
[HA guide](/docs/operations/ha-mode.md).
## Special network variables
These variables help avoid a large number of if/else constructs throughout the code associated with enabling different network stack.
These variables are used in all templates.
By default, only ipv4_stack is enabled, so it is given priority in dualstack mode.
Don't change these variables if you don't understand what you're doing.
* *main_access_ip* - equal to ``access_ip`` when ipv4_stack is enabled(even in case of dualstack),
and ``access_ip6`` for IPv6 only clusters
* *main_ip* - equal to ``ip`` when ipv4_stack is enabled(even in case of dualstack),
and ``ip6`` for IPv6 only clusters
* *main_access_ips* - list of ``access_ip`` and ``access_ip6`` for dualstack and one corresponding variable for single
* *main_ips* - list of ``ip`` and ``ip6`` for dualstack and one corresponding variable for single
## Cluster variables
Kubernetes needs some parameters in order to get deployed. These are the
@@ -83,12 +101,18 @@ following default cluster parameters:
(assertion not applicable to calico which doesn't use this as a hard limit, see
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes)).
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
* *kube_service_subnets* - All service subnets separated by commas (default is a mix of ``kube_service_addresses`` and ``kube_service_addresses_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
for example ``10.233.0.0/18,fd85:ee78:d8a6:8607::1000/116`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
It is not recommended to change this variable directly.
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
* *kube_pods_subnets* - All pods subnets separated by commas (default is a mix of ``kube_pods_subnet`` and ``kube_pod_subnet_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
for example ``10.233.64.0/18,fd85:ee78:d8a6:8607::1:0000/112`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
It is not recommended to change this variable directly.
* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster.
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
@@ -152,9 +176,14 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses``
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
## Enabling Dual Stack (IPV4 + IPV6) networking
## Enabling Dual Stack (IPV4 + IPV6) or IPV6 only networking
If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
IPv4 stack enable by *ipv4_stack* is set to ``true``, by default.
IPv6 stack enable by *ipv6_stack* is set to ``false`` by default.
This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
Set both variables to ``true`` for Dual Stack mode.
IPv4 has higher priority in Dual Stack mode(e.g. in variables `main_ip`, `main_access_ip` and other).
You can also make IPv6 only clusters with ``false`` in *ipv4_stack*.
## DNS variables

View File

@@ -97,27 +97,24 @@ kube_pods_subnet: 10.233.64.0/18
# - kubelet_max_pods: 110
kube_network_node_prefix: 24
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
enable_dual_stack_networks: false
# Kubernetes internal network for IPv6 services, unused block of space.
# This is only used if enable_dual_stack_networks is set to true
# This is only used if ipv6_stack is set to true
# This provides 4096 IPv6 IPs
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
# This network must not already be in your network infrastructure!
# This is only used if enable_dual_stack_networks is set to true.
# This is only used if ipv6_stack is set to true.
# This provides room for 256 nodes with 254 pods per node.
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# IPv6 subnet size allocated to each for pods.
# This is only used if enable_dual_stack_networks is set to true
# This is only used if ipv6_stack is set to true
# This provides room for 254 pods per node.
kube_network_node_prefix_ipv6: 120
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)
# Kube-proxy proxyMode configuration.
@@ -215,8 +212,8 @@ resolvconf_mode: host_resolvconf
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
## Container runtime

View File

@@ -11,7 +11,7 @@ calico_cni_name: k8s-pod-network
# Enables Internet connectivity from containers
# nat_outgoing: true
# nat_outgoing_ipv6: false
# nat_outgoing_ipv6: true
# Enables Calico CNI "host-local" IPAM plugin
# calico_ipam_host_local: true

View File

@@ -122,7 +122,7 @@ enable_cdi: false
# For containerd tracing configuration please check out the official documentation:
# https://github.com/containerd/containerd/blob/main/docs/tracing.md
containerd_tracing_enabled: false
containerd_tracing_endpoint: "0.0.0.0:4317"
containerd_tracing_endpoint: "[::]:4317"
containerd_tracing_protocol: "grpc"
containerd_tracing_sampling_ratio: 1.0
containerd_tracing_service_name: "containerd"

View File

@@ -7,7 +7,7 @@ Requires=cri-dockerd.socket
[Service]
Type=notify
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %}
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnets }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if ipv6_stack %}--ipv6-dual-stack=True{% endif %}
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0

View File

@@ -24,7 +24,7 @@
- name: Wait for etcd up
uri:
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
@@ -39,7 +39,7 @@
- name: Wait for etcd-events up
uri:
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2383/health"
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"

View File

@@ -145,7 +145,7 @@
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
- name: Configure | Check if member is in etcd cluster
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}"
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}"
register: etcd_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false
@@ -163,7 +163,7 @@
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
- name: Configure | Check if member is in etcd-events cluster
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}"
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}"
register: etcd_events_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false

View File

@@ -19,7 +19,7 @@
etcd_events_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2382,
{{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2382",
{%- endif -%}
{%- if loop.last -%}
{{ etcd_member_name }}={{ etcd_events_peer_url }}

View File

@@ -20,7 +20,7 @@
etcd_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2380,
{{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2380",
{%- endif -%}
{%- if loop.last -%}
{{ etcd_member_name }}={{ etcd_peer_url }}

View File

@@ -4,11 +4,11 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %}
ETCD_METRICS={{ etcd_metrics }}
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2383,https://127.0.0.1:2383
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2383,https://127.0.0.1:2383
ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2382
ETCD_NAME={{ etcd_member_name }}-events
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }}

View File

@@ -8,13 +8,13 @@ ETCD_METRICS={{ etcd_metrics }}
{% if etcd_listen_metrics_urls is defined %}
ETCD_LISTEN_METRICS_URLS={{ etcd_listen_metrics_urls }}
{% elif etcd_metrics_port is defined %}
ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }}
ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address | ansible.utils.ipwrap }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }}
{% endif %}
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2379,https://127.0.0.1:2379
ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2380
ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View File

@@ -42,9 +42,16 @@ DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
{% if hostvars[host]['access_ip'] is defined %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
{% endif %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ increment(counter, 'ip') }}
{% if hostvars[host]['access_ip6'] is defined %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip6'] }}{{ increment(counter, 'ip') }}
{% endif %}
{% if ipv6_stack %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip6'] | default(hostvars[host]['fallback_ip6']) }}{{ increment(counter, 'ip') }}
{% endif %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['main_ip'] }}{{ increment(counter, 'ip') }}
{% endfor %}
{% for cert_alt_ip in etcd_cert_alt_ips %}
IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }}
{% endfor %}
IP.{{ counter["ip"] }} = 127.0.0.1
IP.{{ counter["ip"] }} = 127.0.0.1{{ increment(counter, 'ip') }}
IP.{{ counter["ip"] }} = ::1

View File

@@ -71,7 +71,7 @@
user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
username: "kubernetes-admin-{{ cluster_name }}"
context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + (external_apiserver_address | ansible.utils.ipwrap) + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}"
override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}"
when: kubeconfig_localhost

View File

@@ -4,7 +4,7 @@ kube_kubeadm_scheduler_extra_args: {}
# Associated interface must be reachable by the rest of the cluster, and by
# CLI/web clients.
kube_scheduler_bind_address: 0.0.0.0
kube_scheduler_bind_address: "::"
# ClientConnection options (e.g. Burst, QPS) except from kubeconfig.
kube_scheduler_client_conn_extra_opts: {}

View File

@@ -6,7 +6,7 @@ upgrade_cluster_setup: false
# listen on a specific address/interface.
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
kube_apiserver_bind_address: "::"
# A port range to reserve for services with NodePort visibility.
# Inclusive at both ends of the range.
@@ -29,7 +29,7 @@ kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Associated interfaces must be reachable by the rest of the cluster, and by
# CLI/web clients.
kube_controller_manager_bind_address: 0.0.0.0
kube_controller_manager_bind_address: "::"
# Leader election lease durations and timeouts for controller-manager
kube_controller_manager_leader_elect_lease_duration: 15s
@@ -242,7 +242,7 @@ kubeadm_upgrade_auto_cert_renewal: true
## Enable distributed tracing for kube-apiserver
kube_apiserver_tracing: false
kube_apiserver_tracing_endpoint: 0.0.0.0:4317
kube_apiserver_tracing_endpoint: "[::]:4317"
kube_apiserver_tracing_sampling_rate_per_million: 100
# Enable kubeadm file discovery if anonymous access has been removed

View File

@@ -78,7 +78,7 @@
- name: Control plane | wait for kube-scheduler
vars:
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '::' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10259/healthz
validate_certs: false
@@ -92,7 +92,7 @@
- name: Control plane | wait for kube-controller-manager
vars:
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '::' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10257/healthz
validate_certs: false

View File

@@ -4,7 +4,7 @@
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
{%- endif %}
@@ -43,8 +43,8 @@
- name: Wait for k8s apiserver
wait_for:
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
host: "{{ kubeadm_discovery_address | regex_replace('\\]?:\\d+$', '') | regex_replace('^\\[', '') }}"
port: "{{ kubeadm_discovery_address.split(':')[-1] }}"
timeout: 180

View File

@@ -35,12 +35,13 @@
- "{{ kube_apiserver_ip }}"
- "localhost"
- "127.0.0.1"
- "::1"
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') | list | select('defined') | list }}"
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') | list | select('defined') | list }}"
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"

View File

@@ -1,7 +1,7 @@
---
- name: Kubeadm | Check api is up
uri:
url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz"
url: "https://{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
when: ('kube_control_plane' in group_names)
register: _result

View File

@@ -7,7 +7,7 @@ bootstrapTokens:
ttl: "24h"
{% endif %}
localAPIEndpoint:
advertiseAddress: {{ kube_apiserver_address }}
advertiseAddress: "{{ kube_apiserver_address }}"
bindPort: {{ kube_apiserver_port }}
{% if kubeadm_certificate_key is defined %}
certificateKey: {{ kubeadm_certificate_key }}
@@ -41,7 +41,7 @@ etcd:
external:
endpoints:
{% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }}
- "{{ endpoint }}"
{% endfor %}
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
@@ -94,9 +94,9 @@ dns:
imageTag: {{ coredns_image_tag }}
networking:
dnsDomain: {{ dns_domain }}
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
serviceSubnet: "{{ kube_service_subnets }}"
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
podSubnet: "{{ kube_pods_subnets }}"
{% endif %}
{% if kubeadm_feature_gates %}
featureGates:
@@ -106,9 +106,9 @@ featureGates:
{% endif %}
kubernetesVersion: {{ kube_version }}
{% if kubeadm_config_api_fqdn is defined %}
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
{% else %}
controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}
controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}"
{% endif %}
certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }}
@@ -131,7 +131,7 @@ apiServer:
{% else %}
authorization-mode: {{ authorization_modes | join(',') }}
{% endif %}
bind-address: {{ kube_apiserver_bind_address }}
bind-address: "{{ kube_apiserver_bind_address }}"
{% if kube_apiserver_enable_admission_plugins | length > 0 %}
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
{% endif %}
@@ -147,7 +147,7 @@ apiServer:
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
service-cluster-ip-range: "{{ kube_service_subnets }}"
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
profiling: "{{ kube_profiling }}"
request-timeout: "{{ kube_apiserver_request_timeout }}"
@@ -294,7 +294,7 @@ apiServer:
{% endif %}
certSANs:
{% for san in apiserver_sans %}
- "{{ san }}"
- {{ san }}
{% endfor %}
timeoutForControlPlane: 5m0s
controllerManager:
@@ -302,22 +302,22 @@ controllerManager:
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
node-monitor-period: {{ kube_controller_node_monitor_period }}
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
cluster-cidr: "{{ kube_pods_subnets }}"
{% endif %}
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
service-cluster-ip-range: "{{ kube_service_subnets }}"
{% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %}
allocate-node-cidrs: "false"
{% else %}
{% if enable_dual_stack_networks %}
{% if ipv4_stack %}
node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}"
{% endif %}
{% if ipv6_stack %}
node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}"
{% else %}
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
{% endif %}
{% endif %}
profiling: "{{ kube_profiling }}"
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
bind-address: {{ kube_controller_manager_bind_address }}
bind-address: "{{ kube_controller_manager_bind_address }}"
leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }}
leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }}
{% if kube_controller_feature_gates or kube_feature_gates %}
@@ -350,7 +350,7 @@ controllerManager:
{% endif %}
scheduler:
extraArgs:
bind-address: {{ kube_scheduler_bind_address }}
bind-address: "{{ kube_scheduler_bind_address }}"
config: {{ kube_config_dir }}/kubescheduler-config.yaml
{% if kube_scheduler_feature_gates or kube_feature_gates %}
feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
@@ -384,7 +384,7 @@ scheduler:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
bindAddress: {{ kube_proxy_bind_address }}
bindAddress: "{{ kube_proxy_bind_address }}"
clientConnection:
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
burst: {{ kube_proxy_client_burst }}
@@ -392,7 +392,7 @@ clientConnection:
kubeconfig: {{ kube_proxy_client_kubeconfig }}
qps: {{ kube_proxy_client_qps }}
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
clusterCIDR: "{{ kube_pods_subnets }}"
{% endif %}
configSyncPeriod: {{ kube_proxy_config_sync_period }}
conntrack:
@@ -401,7 +401,7 @@ conntrack:
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
enableProfiling: {{ kube_proxy_enable_profiling }}
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}"
hostnameOverride: "{{ kube_override_hostname }}"
iptables:
masqueradeAll: {{ kube_proxy_masquerade_all }}
@@ -417,7 +417,7 @@ ipvs:
tcpTimeout: {{ kube_proxy_tcp_timeout }}
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
udpTimeout: {{ kube_proxy_udp_timeout }}
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}"
mode: {{ kube_proxy_mode }}
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
oomScoreAdj: {{ kube_proxy_oom_score_adj }}

View File

@@ -7,7 +7,7 @@ bootstrapTokens:
ttl: "24h"
{% endif %}
localAPIEndpoint:
advertiseAddress: {{ kube_apiserver_address }}
advertiseAddress: "{{ kube_apiserver_address }}"
bindPort: {{ kube_apiserver_port }}
{% if kubeadm_certificate_key is defined %}
certificateKey: {{ kubeadm_certificate_key }}
@@ -43,7 +43,7 @@ etcd:
external:
endpoints:
{% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }}
- "{{ endpoint }}"
{% endfor %}
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
@@ -106,9 +106,9 @@ dns:
imageTag: {{ coredns_image_tag }}
networking:
dnsDomain: {{ dns_domain }}
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
serviceSubnet: "{{ kube_service_subnets }}"
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
podSubnet: "{{ kube_pods_subnets }}"
{% endif %}
{% if kubeadm_feature_gates %}
featureGates:
@@ -118,9 +118,9 @@ featureGates:
{% endif %}
kubernetesVersion: {{ kube_version }}
{% if kubeadm_config_api_fqdn is defined %}
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
{% else %}
controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}
controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}"
{% endif %}
certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }}
@@ -174,7 +174,7 @@ apiServer:
- name: service-node-port-range
value: "{{ kube_apiserver_node_port_range }}"
- name: service-cluster-ip-range
value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
value: "{{ kube_service_subnets }}"
- name: kubelet-preferred-address-types
value: "{{ kubelet_preferred_address_types }}"
- name: profiling
@@ -351,7 +351,7 @@ apiServer:
{% endif %}
certSANs:
{% for san in apiserver_sans %}
- "{{ san }}"
- {{ san }}
{% endfor %}
controllerManager:
extraArgs:
@@ -361,22 +361,21 @@ controllerManager:
value: "{{ kube_controller_node_monitor_period }}"
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
- name: cluster-cidr
value: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
value: "{{ kube_pods_subnets }}"
{% endif %}
- name: service-cluster-ip-range
value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
value: "{{ kube_service_subnets }}"
{% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %}
- name: allocate-node-cidrs
value: "false"
{% else %}
{% if enable_dual_stack_networks %}
{% if ipv4_stack %}
- name: node-cidr-mask-size-ipv4
value: "{{ kube_network_node_prefix }}"
{% endif %}
{% if ipv6_stack %}
- name: node-cidr-mask-size-ipv6
value: "{{ kube_network_node_prefix_ipv6 }}"
{% else %}
- name: node-cidr-mask-size
value: "{{ kube_network_node_prefix }}"
{% endif %}
{% endif %}
- name: profiling
@@ -480,7 +479,7 @@ scheduler:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
bindAddress: {{ kube_proxy_bind_address }}
bindAddress: "{{ kube_proxy_bind_address }}"
clientConnection:
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
burst: {{ kube_proxy_client_burst }}
@@ -488,7 +487,7 @@ clientConnection:
kubeconfig: {{ kube_proxy_client_kubeconfig }}
qps: {{ kube_proxy_client_qps }}
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
clusterCIDR: "{{ kube_pods_subnets }}"
{% endif %}
configSyncPeriod: {{ kube_proxy_config_sync_period }}
conntrack:
@@ -497,7 +496,7 @@ conntrack:
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
enableProfiling: {{ kube_proxy_enable_profiling }}
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}"
hostnameOverride: "{{ kube_override_hostname }}"
iptables:
masqueradeAll: {{ kube_proxy_masquerade_all }}
@@ -513,7 +512,7 @@ ipvs:
tcpTimeout: {{ kube_proxy_tcp_timeout }}
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
udpTimeout: {{ kube_proxy_udp_timeout }}
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}"
mode: {{ kube_proxy_mode }}
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
oomScoreAdj: {{ kube_proxy_oom_score_adj }}

View File

@@ -9,7 +9,7 @@ discovery:
{% if kubeadm_config_api_fqdn is defined %}
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{% else %}
apiServerEndpoint: {{ kubeadm_discovery_address }}
apiServerEndpoint: "{{ kubeadm_discovery_address }}"
{% endif %}
token: {{ kubeadm_token }}
unsafeSkipCAVerification: true
@@ -24,7 +24,7 @@ timeouts:
{% endif %}
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ kube_apiserver_address }}
advertiseAddress: "{{ kube_apiserver_address }}"
bindPort: {{ kube_apiserver_port }}
certificateKey: {{ kubeadm_certificate_key }}
nodeRegistration:

View File

@@ -4,7 +4,7 @@
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint | replace("https://", "") }}
{%- endif %}

View File

@@ -8,9 +8,9 @@ discovery:
{% else %}
bootstrapToken:
{% if kubeadm_config_api_fqdn is defined %}
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
apiServerEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
{% else %}
apiServerEndpoint: {{ kubeadm_discovery_address }}
apiServerEndpoint: "{{ kubeadm_discovery_address }}"
{% endif %}
token: {{ kubeadm_token }}
{% if ca_cert_content is defined %}
@@ -32,7 +32,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt
{% if kubeadm_cert_controlplane is defined and kubeadm_cert_controlplane %}
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ kube_apiserver_address }}
advertiseAddress: "{{ kube_apiserver_address }}"
bindPort: {{ kube_apiserver_port }}
certificateKey: {{ kubeadm_certificate_key }}
{% endif %}

View File

@@ -1,9 +1,10 @@
---
# advertised host IP for kubelet. This affects network plugin config. Take caution
kubelet_address: "{{ ip | default(fallback_ip) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}"
# add ipv6 manual for dualstack mode because ipv4 priority in main_ip for dualstack
kubelet_address: "{{ main_ips | join(',') }}"
# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces
kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
# bind address for kubelet. Set to :: to listen on all interfaces
kubelet_bind_address: "{{ main_ip | default('::') }}"
# resolv.conf to base dns config
kube_resolv_conf: "/etc/resolv.conf"
@@ -27,11 +28,12 @@ kubelet_systemd_hardening: false
kubelet_systemd_wants_dependencies: []
# List of secure IPs for kubelet
# don't forget ipv6 addresses for dualstack(because "main_ip" prioritizes ipv4)
kube_node_addresses: >-
{%- for host in (groups['k8s_cluster'] | union(groups['etcd'])) -%}
{{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ ' ' if not loop.last else '' }}
{{ hostvars[host]['main_ips'] | join(' ') }}{{ ' ' if not loop.last else '' }}
{%- endfor -%}
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} {{ kube_node_addresses }}"
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnets | regex_replace(',', ' ') }} {{ kube_node_addresses }}"
# Reserve this space for kube resources
# Whether to run kubelet and container-engine daemons in a dedicated cgroup. (Not required for resource reservations).
@@ -190,7 +192,7 @@ conntrack_modules:
## Enable distributed tracing for kubelet
kubelet_tracing: false
kubelet_tracing_endpoint: 0.0.0.0:4317
kubelet_tracing_endpoint: "[::]:4317"
kubelet_tracing_sampling_rate_per_million: 100
# The maximum number of image pulls in parallel. Set it to a integer great than 1 to enable image pulling in parallel.

View File

@@ -27,7 +27,7 @@
- name: Install nginx-proxy
import_tasks: loadbalancer/nginx-proxy.yml
when:
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
@@ -36,7 +36,7 @@
- name: Install haproxy
import_tasks: loadbalancer/haproxy.yml
when:
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:

View File

@@ -29,10 +29,10 @@ containerLogMaxSize: {{ kubelet_logfiles_max_size }}
containerRuntimeEndpoint : {{ cri_socket }}
maxPods: {{ kubelet_max_pods }}
podPidsLimit: {{ kubelet_pod_pids_limit }}
address: {{ kubelet_bind_address }}
address: "{{ kubelet_bind_address }}"
readOnlyPort: {{ kube_read_only_port }}
healthzPort: {{ kubelet_healthz_port }}
healthzBindAddress: {{ kubelet_healthz_bind_address }}
healthzBindAddress: "{{ kubelet_healthz_bind_address }}"
kubeletCgroups: {{ kubelet_kubelet_cgroups }}
clusterDomain: {{ dns_domain }}
{% if kubelet_protect_kernel_defaults | bool %}
@@ -130,7 +130,7 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }}
{% endif %}
{% if kubelet_tracing %}
tracing:
endpoint: {{ kubelet_tracing_endpoint }}
endpoint: "{{ kubelet_tracing_endpoint }}"
samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }}
{% endif %}
maxParallelImagePulls: {{ kubelet_max_parallel_image_pulls }}

View File

@@ -22,7 +22,7 @@ defaults
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
frontend healthz
bind 0.0.0.0:{{ loadbalancer_apiserver_healthcheck_port }}
{% if enable_dual_stack_networks -%}
{% if ipv6_stack -%}
bind :::{{ loadbalancer_apiserver_healthcheck_port }}
{% endif -%}
mode http
@@ -31,7 +31,7 @@ frontend healthz
frontend kube_api_frontend
bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
{% if enable_dual_stack_networks -%}
{% if ipv6_stack -%}
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
{% endif -%}
mode tcp
@@ -45,5 +45,5 @@ backend kube_api_backend
option httpchk GET /healthz
http-check expect status 200
{% for host in groups['kube_control_plane'] -%}
server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }} check check-ssl verify none
server {{ host }} {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} check check-ssl verify none
{% endfor -%}

View File

@@ -14,13 +14,13 @@ stream {
upstream kube_apiserver {
least_conn;
{% for host in groups['kube_control_plane'] -%}
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }};
server {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }};
{% endfor -%}
}
server {
listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
{% if enable_dual_stack_networks -%}
{% if ipv6_stack -%}
listen [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
{% endif -%}
proxy_pass kube_apiserver;
@@ -44,7 +44,7 @@ http {
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
server {
listen {{ loadbalancer_apiserver_healthcheck_port }};
{% if enable_dual_stack_networks -%}
{% if ipv6_stack -%}
listen [::]:{{ loadbalancer_apiserver_healthcheck_port }};
{% endif -%}
location /healthz {

View File

@@ -5,7 +5,7 @@ clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
server: "{{ kube_apiserver_endpoint }}"
users:
- name: kubelet
user:

View File

@@ -1,7 +1,7 @@
---
- name: Stop if any host not in '--limit' does not have a fact cache
vars:
uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}"
uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv6', 'value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}"
excluded_hosts: "{{ groups['k8s_cluster'] | difference(query('inventory_hostnames', ansible_limit)) }}"
assert:
that: uncached_hosts | intersect(excluded_hosts) == []
@@ -105,6 +105,7 @@
- not ignore_assert_errors
- ('k8s_cluster' in group_names)
- kube_network_plugin not in ['calico', 'none']
- ipv4_stack | bool
- name: Stop if ip var does not match local ips
assert:
@@ -125,16 +126,16 @@
{%- endif -%}
state: present
when:
- access_ip is defined
- main_access_ip is defined
- not ignore_assert_errors
- ping_access_ip
- not is_fedora_coreos
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Stop if access_ip is not pingable
command: ping -c1 {{ access_ip }}
command: ping -c1 {{ main_access_ip }}
when:
- access_ip is defined
- main_access_ip is defined
- not ignore_assert_errors
- ping_access_ip
changed_when: false
@@ -179,12 +180,19 @@
- cloud-provider
- facts
- name: Warn if `enable_dual_stack_networks` is set
debug:
msg: "WARNING! => `enable_dual_stack_networks` deprecation. Please switch to using ipv4_stack and ipv6_stack."
when:
- enable_dual_stack_networks is defined
- name: "Check that kube_service_addresses is a network range"
assert:
that:
- kube_service_addresses | ansible.utils.ipaddr('net')
msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range"
run_once: true
when: ipv4_stack | bool
- name: "Check that kube_pods_subnet is a network range"
assert:
@@ -192,6 +200,7 @@
- kube_pods_subnet | ansible.utils.ipaddr('net')
msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range"
run_once: true
when: ipv4_stack | bool
- name: "Check that kube_pods_subnet does not collide with kube_service_addresses"
assert:
@@ -199,13 +208,50 @@
- kube_pods_subnet | ansible.utils.ipaddr(kube_service_addresses) | string == 'None'
msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses"
run_once: true
when: ipv4_stack | bool
- name: "Check that IP range is enough for the nodes"
- name: "Check that ipv4 IP range is enough for the nodes"
assert:
that:
- 2 ** (kube_network_node_prefix - kube_pods_subnet | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
msg: "Not enough IPs are available for the desired node count."
when: kube_network_plugin != 'calico'
msg: "Not enough ipv4 IPs are available for the desired node count."
when:
- ipv4_stack | bool
- kube_network_plugin != 'calico'
run_once: true
- name: "Check that kube_service_addresses_ipv6 is a network range"
assert:
that:
- kube_service_addresses_ipv6 | ansible.utils.ipaddr('net')
msg: "kube_service_addresses_ipv6 = '{{ kube_service_addresses_ipv6 }}' is not a valid network range"
run_once: true
when: ipv6_stack | bool
- name: "Check that kube_pods_subnet_ipv6 is a network range"
assert:
that:
- kube_pods_subnet_ipv6 | ansible.utils.ipaddr('net')
msg: "kube_pods_subnet_ipv6 = '{{ kube_pods_subnet_ipv6 }}' is not a valid network range"
run_once: true
when: ipv6_stack | bool
- name: "Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6"
assert:
that:
- kube_pods_subnet_ipv6 | ansible.utils.ipaddr(kube_service_addresses_ipv6) | string == 'None'
msg: "kube_pods_subnet_ipv6 cannot be the same network segment as kube_service_addresses_ipv6"
run_once: true
when: ipv6_stack | bool
- name: "Check that ipv6 IP range is enough for the nodes"
assert:
that:
- 2 ** (kube_network_node_prefix_ipv6 - kube_pods_subnet_ipv6 | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
msg: "Not enough ipv6 IPs are available for the desired node count."
when:
- ipv6_stack | bool
- kube_network_plugin != 'calico'
run_once: true
- name: Stop if unsupported options selected

View File

@@ -76,6 +76,7 @@
value: "1"
state: present
reload: true
when: ipv4_stack | bool
- name: Enable ipv6 forwarding
ansible.posix.sysctl:
@@ -84,7 +85,7 @@
value: "1"
state: present
reload: true
when: enable_dual_stack_networks | bool
when: ipv6_stack | bool
- name: Check if we need to set fs.may_detach_mounts
stat:

View File

@@ -2,11 +2,10 @@
- name: Hosts | create hosts list from inventory
set_fact:
etc_hosts_inventory_block: |-
{% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %}
{% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique %}
{{ hostvars[item]['main_access_ip'] }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }}
{% if ipv4_stack and ipv6_stack %}
{{ hostvars[item]['access_ip6'] | default(hostvars[item]['ip6'] | default(hostvars[item]['ansible_default_ipv6']['address'])) }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }}
{% endif %}
{% endfor %}
delegate_to: localhost

View File

@@ -135,8 +135,8 @@ resolvconf_mode: host_resolvconf
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes DNS service (called skydns for historical reasons)
skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
docker_dns_search_domains:
- 'default.svc.{{ dns_domain }}'
@@ -230,33 +230,39 @@ kube_pods_subnet: 10.233.64.0/18
kube_network_node_prefix: 24
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
enable_dual_stack_networks: false
# enable_dual_stack_networks: false # deprecated
# Configure IPv4 Stack networking
ipv4_stack: true
# Configure IPv6 Stack networking
ipv6_stack: "{{ enable_dual_stack_networks | default(false) }}"
# Kubernetes internal network for IPv6 services, unused block of space.
# This is only used if enable_dual_stack_networks is set to true
# This is only used if ipv6_stack is set to true
# This provides 4096 IPv6 IPs
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
# This network must not already be in your network infrastructure!
# This is only used if enable_dual_stack_networks is set to true.
# This is only used if ipv6_stack is set to true.
# This provides room for 256 nodes with 254 pods per node.
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
# IPv6 subnet size allocated to each for pods.
# This is only used if enable_dual_stack_networks is set to true
# This is only used if ipv6_stack is set to true
# This provides room for 254 pods per node.
kube_network_node_prefix_ipv6: 120
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
# access IP value (automatically evaluated below)
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
kube_apiserver_bind_address: "::"
# https
kube_apiserver_port: 6443
@@ -608,9 +614,9 @@ ssl_ca_dirs: |-
# Vars for pointing to kubernetes api endpoints
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
kube_apiserver_address: "{{ ip | default(hostvars[inventory_hostname]['fallback_ip']) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip'])) }}"
kube_apiserver_address: "{{ hostvars[inventory_hostname]['main_ip'] }}"
kube_apiserver_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['main_access_ip'] }}"
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
loadbalancer_apiserver_type: "nginx"
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
@@ -621,7 +627,7 @@ kube_apiserver_global_endpoint: |-
{%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%}
https://localhost:{{ kube_apiserver_port }}
{%- else -%}
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
{%- endif %}
kube_apiserver_endpoint: |-
{% if loadbalancer_apiserver is defined -%}
@@ -629,9 +635,9 @@ kube_apiserver_endpoint: |-
{%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
{%- elif 'kube_control_plane' in group_names -%}
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
https://{{ kube_apiserver_bind_address | regex_replace('::', '127.0.0.1') | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
{%- else -%}
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
{%- endif %}
kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt"
kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
@@ -643,41 +649,41 @@ etcd_events_cluster_enabled: false
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
# Vars for pointing to etcd endpoints
etcd_address: "{{ ip | default(fallback_ip) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383"
etcd_address: "{{ hostvars[inventory_hostname]['main_ip'] }}"
etcd_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
etcd_events_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
etcd_peer_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2380"
etcd_client_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2379"
etcd_events_peer_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2382"
etcd_events_client_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2383"
etcd_access_addresses: |-
{% for item in etcd_hosts -%}
https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2379{% if not loop.last %},{% endif %}
https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_access_addresses_list: |-
[
{% for item in etcd_hosts -%}
'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2383'{% if not loop.last %},{% endif %}
'https://{{ hostvars[item].main_access_ip | ansible.utils.ipwrap }}:2383'{% if not loop.last %},{% endif %}
{%- endfor %}
]
etcd_metrics_addresses: |-
{% for item in etcd_hosts -%}
https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}"
etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}"
# user should set etcd_member_name in inventory/mycluster/hosts.ini
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
{% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
{% endfor %}
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2380{% if not loop.last %},{% endif %}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2382{% if not loop.last %},{% endif %}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_heartbeat_interval: "250"

View File

@@ -18,6 +18,38 @@
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
when: fallback_ip is not defined
- name: Gather ansible_default_ipv6
setup:
gather_subset: '!all,network'
filter: "ansible_default_ipv6"
when: ansible_default_ipv6 is not defined
ignore_unreachable: true
- name: Set fallback_ip6
set_fact:
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
when: fallback_ip6 is not defined
- name: Set main access ip(access_ip based on ipv4_stack/ipv6_stack options).
set_fact:
main_access_ip: >-
{%- if ipv4_stack -%}
{{ access_ip | default(ip | default(fallback_ip)) }}
{%- else -%}
{{ access_ip6 | default(ip6 | default(fallback_ip6)) }}
{%- endif -%}
- name: Set main ip(ip based on ipv4_stack/ipv6_stack options).
set_fact:
main_ip: "{{ (ip | default(fallback_ip)) if ipv4_stack else (ip6 | default(fallback_ip6)) }}"
- name: Set main access ips(mixed ips for dualstack).
set_fact:
main_access_ips: ["{{ (main_access_ip + ',' + (access_ip6 | default(ip6 | default(fallback_ip6)))) if (ipv4_stack and ipv6_stack) else main_access_ip }}"]
- name: Set main ips(mixed ips for dualstack).
set_fact:
main_ips: ["{{ (main_ip + ',' + (ip6 | default(fallback_ip6))) if (ipv4_stack and ipv6_stack) else main_ip }}"]
- name: Set no_proxy
import_tasks: no_proxy.yml
when:

View File

@@ -13,7 +13,7 @@
{% set cluster_or_control_plane = 'k8s_cluster' %}
{%- endif -%}
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }},
{{ hostvars[item]['main_access_ip'] }},
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
@@ -23,7 +23,7 @@
{%- if additional_no_proxy is defined -%}
{{ additional_no_proxy }},
{%- endif -%}
127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }}
127.0.0.1,localhost,{{ kube_service_subnets }},{{ kube_pods_subnets }},svc,svc.{{ dns_domain }}
delegate_to: localhost
connection: local
delegate_facts: true

View File

@@ -7,3 +7,23 @@ kube_proxy_deployed: "{{ 'addon/kube-proxy' not in kubeadm_init_phases_skip }}"
calico_min_version_required: "v3.19.4"
containerd_min_version_required: "1.3.7"
# mixed kube_service_addresses/kube_service_addresses_ipv6 for a variety of network stacks(dualstack, ipv6only, ipv4only)
kube_service_subnets: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_service_addresses }},{{ kube_service_addresses_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_service_addresses }}
{%- else -%}
{{ kube_service_addresses_ipv6 }}
{%- endif -%}
# mixed kube_pods_subnet/kube_pods_subnet_ipv6 for a variety of network stacks(dualstack, ipv6only, ipv4only)
kube_pods_subnets: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_pods_subnet }},{{ kube_pods_subnet_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_pods_subnet }}
{%- else -%}
{{ kube_pods_subnet_ipv6 }}
{%- endif -%}

View File

@@ -146,12 +146,16 @@
check_mode: false
register: calico
run_once: true
when: ipv4_stack | bool
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_conf"
set_fact:
calico_pool_conf: '{{ calico.stdout | from_json }}'
when: calico.rc == 0 and calico.stdout
when:
- ipv4_stack | bool
- calico is defined
- calico.rc == 0 and calico.stdout
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -164,10 +168,45 @@
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode
msg: "Your inventory doesn't match the current cluster configuration"
when:
- ipv4_stack | bool
- calico_pool_conf is defined
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Get Calico {{ calico_pool_name }}-ipv6 configuration"
command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }}-ipv6 -o json"
failed_when: false
changed_when: false
check_mode: false
register: calico_ipv6
run_once: true
when: ipv6_stack | bool
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_ipv6_conf"
set_fact:
calico_pool_conf: '{{ calico_ipv6.stdout | from_json }}'
when:
- ipv6_stack | bool
- alico_ipv6 is defined
- calico_ipv6.rc == 0 and calico_ipv6.stdout
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check if ipv6 inventory match current cluster configuration"
assert:
that:
- calico_pool_conf.spec.blockSize | int == calico_pool_blocksize_ipv6 | int
- calico_pool_conf.spec.cidr == (calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6))
- not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode_ipv6
- not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode_ipv6
msg: "Your ipv6 inventory doesn't match the current cluster configuration"
when:
- ipv6_stack | bool
- calico_pool_ipv6_conf is defined
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if calico_apiserver_enabled"
assert:
that: calico_datastore == "kdd"
@@ -191,7 +230,6 @@
that:
- "calico_ipip_mode_ipv6 in ['Never']"
msg: "Calico doesn't support ipip tunneling for the IPv6"
when:
- enable_dual_stack_networks
when: ipv6_stack | bool
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"

View File

@@ -84,6 +84,7 @@
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv4_stack | bool
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
assert:
@@ -91,8 +92,9 @@
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- 'calico_conf.stdout == "0"'
- ipv4_stack | bool
- calico_pool_cidr is defined
- 'calico_conf.stdout == "0"'
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa risky-shell-pipe - grep will exit 1 if no match found
@@ -107,7 +109,7 @@
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks
- ipv6_stack
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
assert:
@@ -115,9 +117,9 @@
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv6_stack | bool
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
- name: Calico | kdd specific configuration
when:
@@ -206,6 +208,7 @@
- name: Calico | Configure Calico IP Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- ipv4_stack | bool
block:
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
@@ -256,7 +259,7 @@
- name: Calico | Configure Calico IPv6 Pool
when:
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks | bool
- ipv6_stack | bool
block:
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
@@ -350,7 +353,15 @@
{% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %}
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} ,
{% if calico_advertise_cluster_ips | default(false) %}
"serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
"serviceClusterIPs": >-
{%- if ipv4_stack and ipv6_stack-%}
[{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}],
{%- elif ipv6_stack-%}
[{"cidr": "{{ kube_service_addresses_ipv6 }}"}],
{%- else -%}
[{"cidr": "{{ kube_service_addresses }}"}],
{%- endif -%}
{% endif %}
{% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
"serviceExternalIPs": {{ _service_external_ips | default([]) }}
}

View File

@@ -53,13 +53,15 @@ data:
"type": "host-local",
"subnet": "usePodCidr"
},
{% else %}
{% else %}
"ipam": {
"type": "calico-ipam",
{% if enable_dual_stack_networks %}
"assign_ipv6": "true",
{% endif %}
"assign_ipv4": "true"
{% if ipv4_stack %}
"assign_ipv4": "true"{{ ',' if (ipv6_stack and ipv4_stack) }}
{% endif %}
{% if ipv6_stack %}
"assign_ipv6": "true"
{% endif %}
},
{% endif %}
{% if calico_allow_ip_forwarding %}

View File

@@ -265,7 +265,7 @@ spec:
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
- name: FELIX_IPV6SUPPORT
value: "{{ enable_dual_stack_networks | default(false) }}"
value: "{{ ipv6_stack | default(false) }}"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{ calico_loglevel }}"
@@ -308,9 +308,18 @@ spec:
- name: IP_AUTODETECTION_METHOD
value: "can-reach=$(NODEIP)"
{% endif %}
{% if ipv4_stack %}
- name: IP
value: "autodetect"
{% if calico_ip6_auto_method is defined and enable_dual_stack_networks %}
{% else %}
- name: IP
value: none
{% endif %}
{% if ipv6_stack %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_ip6_auto_method is defined and ipv6_stack %}
- name: IP6_AUTODETECTION_METHOD
value: "{{ calico_ip6_auto_method }}"
{% endif %}
@@ -318,10 +327,6 @@ spec:
- name: FELIX_MTUIFACEPATTERN
value: "{{ calico_felix_mtu_iface_pattern }}"
{% endif %}
{% if enable_dual_stack_networks %}
- name: IP6
value: autodetect
{% endif %}
{% if calico_use_default_route_src_ipaddr | default(false) %}
- name: FELIX_DEVICEROUTESOURCEADDRESS
valueFrom:

View File

@@ -22,7 +22,7 @@ calico_pool_blocksize: 26
# Calico doesn't support ipip tunneling for the IPv6.
calico_ipip_mode_ipv6: Never
calico_vxlan_mode_ipv6: Never
calico_vxlan_mode_ipv6: Always
# add default ipv6 ippool blockSize
calico_pool_blocksize_ipv6: 122

View File

@@ -4,8 +4,8 @@ cilium_min_version_required: "1.10"
cilium_debug: false
cilium_mtu: ""
cilium_enable_ipv4: true
cilium_enable_ipv6: false
cilium_enable_ipv4: "{{ ipv4_stack }}"
cilium_enable_ipv6: "{{ ipv6_stack }}"
# Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/
cilium_l2announcements: false

View File

@@ -2,7 +2,7 @@
# Flannel public IP
# The address that flannel should advertise as how to access the system
# Disabled until https://github.com/coreos/flannel/issues/712 is fixed
# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ip)) }}"
# flannel_public_ip: "{{ main_access_ip }}"
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item

View File

@@ -30,12 +30,14 @@ data:
}
net-conf.json: |
{
{% if ipv4_stack %}
"Network": "{{ kube_pods_subnet }}",
"EnableIPv4": true,
{% if enable_dual_stack_networks %}
{% endif %}
{% if ipv6_stack %}
"EnableIPv6": true,
"IPv6Network": "{{ kube_pods_subnet_ipv6 }}",
{% endif %}
{% endif %}
"Backend": {
"Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %},
"VNI": {{ flannel_vxlan_vni }},

View File

@@ -33,7 +33,7 @@ kube_ovn_central_replics: "{{ kube_ovn_central_hosts | length }}"
kube_ovn_controller_replics: "{{ kube_ovn_central_hosts | length }}"
kube_ovn_central_ips: |-
{% for item in kube_ovn_central_hosts -%}
{{ hostvars[item]['ip'] | default(hostvars[item]['fallback_ip']) }}{% if not loop.last %},{% endif %}
{{ hostvars[item]['main_ip'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
kube_ovn_ic_enable: false
@@ -62,6 +62,15 @@ kube_ovn_traffic_mirror: false
kube_ovn_external_address: 8.8.8.8
kube_ovn_external_address_ipv6: 2400:3200::1
kube_ovn_external_address_merged: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_ovn_external_address }},{{ kube_ovn_external_address_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_ovn_external_address }}
{%- else -%}
{{ kube_ovn_external_address_ipv6 }}
{%- endif -%}
kube_ovn_external_dns: alauda.cn
# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0
@@ -74,6 +83,14 @@ kube_ovn_u2o_interconnection: false
# kube_ovn_default_exclude_ips: 10.16.0.1
kube_ovn_node_switch_cidr: 100.64.0.0/16
kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64
kube_ovn_node_switch_cidr_merged: >-
{%- if ipv4_stack and ipv6_stack -%}
{{ kube_ovn_node_switch_cidr }},{{ kube_ovn_node_switch_cidr_ipv6 }}
{%- elif ipv4_stack -%}
{{ kube_ovn_node_switch_cidr }}
{%- else -%}
{{ kube_ovn_node_switch_cidr_ipv6 }}
{%- endif -%}
## vlan config, set default interface name and vlan id
# kube_ovn_default_interface_name: eth0

View File

@@ -240,14 +240,14 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /kube-ovn/start-controller.sh
- --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }}
- --default-cidr={{ kube_pods_subnets }}
- --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }}
- --default-gateway-check={{ kube_ovn_default_gateway_check | string }}
- --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }}
- --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }}
- --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --node-switch-cidr={{ kube_ovn_node_switch_cidr_merged }}
- --service-cluster-ip-range={{ kube_service_subnets }}
- --network-type={{ kube_ovn_network_type }}
- --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
- --default-vlan-id={{ kube_ovn_default_vlan_id }}
@@ -403,7 +403,7 @@ spec:
args:
- --enable-mirror={{ kube_ovn_traffic_mirror | lower }}
- --encap-checksum={{ kube_ovn_encap_checksum | lower }}
- --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
- --service-cluster-ip-range={{ kube_service_subnets }}
- --iface={{ kube_ovn_iface | default('') }}
- --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }}
- --network-type={{ kube_ovn_network_type }}
@@ -588,7 +588,7 @@ spec:
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }}
- --external-address={{ kube_ovn_external_address_merged }}
- --external-dns={{ kube_ovn_external_dns }}
- --logtostderr=false
- --alsologtostderr=true
@@ -837,7 +837,7 @@ spec:
- name: metrics
port: 10661
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -852,7 +852,7 @@ metadata:
labels:
app: kube-ovn-pinger
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -869,7 +869,7 @@ metadata:
labels:
app: kube-ovn-controller
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -886,7 +886,7 @@ metadata:
labels:
app: kube-ovn-cni
spec:
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:

View File

@@ -260,7 +260,7 @@ spec:
port: 6641
targetPort: 6641
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -280,7 +280,7 @@ spec:
port: 6642
targetPort: 6642
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:
@@ -300,7 +300,7 @@ spec:
port: 6643
targetPort: 6643
type: ClusterIP
{% if enable_dual_stack_networks %}
{% if ipv6_stack %}
ipFamilyPolicy: PreferDualStack
{% endif %}
selector:

View File

@@ -1,6 +1,6 @@
apiVersion: v1
kind: Config
clusterCIDR: {{ kube_pods_subnet }}
clusterCIDR: {{ kube_pods_subnets }}
clusters:
- name: cluster
cluster:

View File

@@ -18,7 +18,7 @@ weave_hairpin_mode: true
# The range of IP addresses used by Weave Net and the subnet they are placed in
# (CIDR format; default 10.32.0.0/12)
weave_ipalloc_range: "{{ kube_pods_subnet }}"
weave_ipalloc_range: "{{ kube_pods_subnets }}"
# Set to 0 to disable Network Policy Controller (default is on)
weave_expect_npc: "{{ enable_network_policy }}"

View File

@@ -6,10 +6,10 @@
etcd_servers: >-
{% for host in groups['etcd'] -%}
{% if not loop.last -%}
https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379,
https://{{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:2379,
{%- endif -%}
{%- if loop.last -%}
https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2379
https://{{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:2379
{%- endif -%}
{%- endfor -%}

View File

@@ -7,8 +7,6 @@
changed_when: false
when:
- groups['kube_control_plane'] | length > 0
- ip is not defined
- access_ip is not defined
delegate_to: "{{ groups['kube_control_plane'] | first }}"
- name: Remove etcd member from cluster
@@ -29,7 +27,12 @@
- facts
- name: Remove member from cluster
vars:
node_ip: "{{ ip if ip is defined else (access_ip if access_ip is defined else (k8s_node_ips.stdout | from_json)[0]) }}"
node_ip: >-
{%- if not ipv4_stack -%}
{{ ip6 if ip6 is defined else (access_ip6 if access_ip6 is defined else (k8s_node_ips.stdout | from_json)[0]) | ansible.utils.ipwrap }}
{%- else -%}
{{ ip if ip is defined else (access_ip if access_ip is defined else (k8s_node_ips.stdout | from_json)[0]) | ansible.utils.ipwrap }}
{%- endif -%}
command:
argv:
- "{{ bin_dir }}/etcdctl"

View File

@@ -189,7 +189,7 @@
- nat
- mangle
- raw
when: flush_iptables | bool
when: flush_iptables | bool and ipv4_stack
tags:
- iptables
@@ -203,7 +203,7 @@
- nat
- mangle
- raw
when: flush_iptables | bool and enable_dual_stack_networks
when: flush_iptables | bool and ipv6_stack
tags:
- ip6tables

View File

@@ -1,3 +0,0 @@
---
# Kubespray settings
enable_dual_stack_networks: true

View File

@@ -1,4 +1,6 @@
# For CI we are not worried about data persistence across reboot
$os = "ubuntu2404"
$vm_cpus = 2
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting

View File

@@ -0,0 +1,8 @@
---
# Instance settings
cloud_image: ubuntu-2404
mode: default
# Kubespray settings
ipv4_stack: true
ipv6_stack: true

View File

@@ -0,0 +1,9 @@
$os = "ubuntu2404"
$vm_cpus = 2
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$network_plugin = "calico"

View File

@@ -0,0 +1,12 @@
---
# Instance settings
cloud_image: ubuntu-2404
mode: default
# Kubespray settings
ipv4_stack: false
ipv6_stack: true
kube_network_plugin: calico
etcd_deployment_type: kubeadm
kube_proxy_mode: iptables
enable_nodelocaldns: false

View File

@@ -5,7 +5,7 @@
tasks:
- name: Check the API servers are responding
uri:
url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port | default(6443) }}/version"
url: "https://{{ (access_ip if (ipv4_stack | default(true)) else access_ip6) | default(ansible_default_ipv4.address if (ipv4_stack | default(true)) else ansible_default_ipv6.address) | ansible.utils.ipwrap }}:{{ kube_apiserver_port | default(6443) }}/version"
validate_certs: false
status_code: 200
register: apiserver_response

View File

@@ -7,7 +7,7 @@
# TODO: source those from kubespray-defaults instead.
# Needs kubespray-defaults to be decoupled from no-proxy stuff
bin_dir: "/usr/local/bin"
kube_pods_subnet: 10.233.64.0/18
kube_pods_subnet: "{{ 'fd85:ee78:d8a6:8607::1:0000/112' if not (ipv4_stack | default(true)) else '10.233.64.0/18' }}"
tasks:
@@ -115,7 +115,7 @@
| length == 2
- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP }}:8080"
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP | ansible.utils.ipwrap}}:8080"
with_nested:
- "{{ pods }}"
- "{{ pods }}"

View File

@@ -51,7 +51,7 @@
block:
- name: Get netchecker agents
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/agents/"
return_content: true
headers:
Accept: application/json
@@ -64,7 +64,7 @@
- name: Check netchecker status
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/connectivity_check"
return_content: true
headers:
Accept: application/json