mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-03-22 17:38:48 +03:00
Compare commits
10 Commits
424dc33b2f
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fae47ab9e6 | ||
|
|
e979e770f2 | ||
|
|
b1e3816b2f | ||
|
|
391b08c645 | ||
|
|
39b97464be | ||
|
|
3c6d368397 | ||
|
|
03d17fea92 | ||
|
|
dbb8527560 | ||
|
|
7acdc4df64 | ||
|
|
a51773e78f |
@@ -57,6 +57,7 @@ pr:
|
||||
- ubuntu24-kube-router-svc-proxy
|
||||
- ubuntu24-ha-separate-etcd
|
||||
- fedora40-flannel-crio-collection-scale
|
||||
- openeuler24-calico
|
||||
|
||||
# This is for flakey test so they don't disrupt the PR worklflow too much.
|
||||
# Jobs here MUST have a open issue so we don't lose sight of them
|
||||
@@ -67,7 +68,6 @@ pr-flakey:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- flatcar4081-calico # https://github.com/kubernetes-sigs/kubespray/issues/12309
|
||||
- openeuler24-calico # https://github.com/kubernetes-sigs/kubespray/issues/12877
|
||||
|
||||
# The ubuntu24-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
ubuntu24-calico-all-in-one:
|
||||
|
||||
@@ -119,7 +119,7 @@ Note:
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||
- [cilium](https://github.com/cilium/cilium) 1.19.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
|
||||
@@ -245,7 +245,7 @@ cilium_operator_extra_volume_mounts:
|
||||
## Choose Cilium version
|
||||
|
||||
```yml
|
||||
cilium_version: "1.18.6"
|
||||
cilium_version: "1.19.1"
|
||||
```
|
||||
|
||||
## Add variable to config
|
||||
|
||||
@@ -32,12 +32,12 @@ etcd_metrics_service_labels:
|
||||
k8s-app: etcd
|
||||
app.kubernetes.io/managed-by: Kubespray
|
||||
app: kube-prometheus-stack-kube-etcd
|
||||
release: prometheus-stack
|
||||
release: kube-prometheus-stack
|
||||
```
|
||||
|
||||
The last two labels in the above example allows to scrape the metrics from the
|
||||
[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack)
|
||||
chart with the following Helm `values.yaml` :
|
||||
chart when it is installed with the release name `kube-prometheus-stack` and the following Helm `values.yaml`:
|
||||
|
||||
```yaml
|
||||
kubeEtcd:
|
||||
@@ -45,8 +45,22 @@ kubeEtcd:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
To fully override metrics exposition urls, define it in the inventory with:
|
||||
If your Helm release name is different, adjust the `release` label accordingly.
|
||||
|
||||
To fully override metrics exposition URLs, define it in the inventory with:
|
||||
|
||||
```yaml
|
||||
etcd_listen_metrics_urls: "http://0.0.0.0:2381"
|
||||
```
|
||||
|
||||
If you choose to expose metrics on specific node IPs (for example `10.141.4.22`, `10.141.4.23`, `10.141.4.24`) in `etcd_listen_metrics_urls`,
|
||||
you can configure kube-prometheus-stack to scrape those endpoints directly with:
|
||||
|
||||
```yaml
|
||||
kubeEtcd:
|
||||
enabled: true
|
||||
endpoints:
|
||||
- 10.141.4.22
|
||||
- 10.141.4.23
|
||||
- 10.141.4.24
|
||||
```
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
- name: Gather and compute network facts
|
||||
import_role:
|
||||
name: network_facts
|
||||
tags:
|
||||
- always
|
||||
- name: Gather minimal facts
|
||||
setup:
|
||||
gather_subset: '!all'
|
||||
|
||||
@@ -12,6 +12,10 @@ coreos_locksmithd_disable: false
|
||||
# Install epel repo on Centos/RHEL
|
||||
epel_enabled: false
|
||||
|
||||
## openEuler specific variables
|
||||
# Enable metalink for openEuler repos (auto-selects fastest mirror by location)
|
||||
openeuler_metalink_enabled: false
|
||||
|
||||
## Oracle Linux specific variables
|
||||
# Install public repo on Oracle Linux
|
||||
use_oracle_public_repo: true
|
||||
|
||||
@@ -1,3 +1,43 @@
|
||||
---
|
||||
- name: Import Centos boostrap for openEuler
|
||||
import_tasks: centos.yml
|
||||
- name: Import CentOS bootstrap for openEuler
|
||||
ansible.builtin.import_tasks: centos.yml
|
||||
|
||||
- name: Get existing openEuler repo sections
|
||||
ansible.builtin.shell:
|
||||
cmd: "set -o pipefail && grep '^\\[' /etc/yum.repos.d/openEuler.repo | tr -d '[]'"
|
||||
executable: /bin/bash
|
||||
register: _openeuler_repo_sections
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
check_mode: false
|
||||
become: true
|
||||
when: openeuler_metalink_enabled
|
||||
|
||||
- name: Enable metalink for openEuler repos
|
||||
community.general.ini_file:
|
||||
path: /etc/yum.repos.d/openEuler.repo
|
||||
section: "{{ item.key }}"
|
||||
option: metalink
|
||||
value: "{{ item.value }}"
|
||||
no_extra_spaces: true
|
||||
mode: "0644"
|
||||
loop: "{{ _openeuler_metalink_repos | dict2items | selectattr('key', 'in', _openeuler_repo_sections.stdout_lines | default([])) }}"
|
||||
become: true
|
||||
when: openeuler_metalink_enabled
|
||||
register: _openeuler_metalink_result
|
||||
vars:
|
||||
_openeuler_metalink_repos:
|
||||
OS: "https://mirrors.openeuler.org/metalink?repo=$releasever/OS&arch=$basearch"
|
||||
everything: "https://mirrors.openeuler.org/metalink?repo=$releasever/everything&arch=$basearch"
|
||||
EPOL: "https://mirrors.openeuler.org/metalink?repo=$releasever/EPOL/main&arch=$basearch"
|
||||
debuginfo: "https://mirrors.openeuler.org/metalink?repo=$releasever/debuginfo&arch=$basearch"
|
||||
source: "https://mirrors.openeuler.org/metalink?repo=$releasever&arch=source"
|
||||
update: "https://mirrors.openeuler.org/metalink?repo=$releasever/update&arch=$basearch"
|
||||
update-source: "https://mirrors.openeuler.org/metalink?repo=$releasever/update&arch=source"
|
||||
|
||||
- name: Clean dnf cache to apply metalink mirror selection
|
||||
ansible.builtin.command: dnf clean all
|
||||
become: true
|
||||
when:
|
||||
- openeuler_metalink_enabled
|
||||
- _openeuler_metalink_result.changed
|
||||
|
||||
@@ -45,7 +45,7 @@ data:
|
||||
force_tcp
|
||||
}
|
||||
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
|
||||
health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }}
|
||||
health {{ nodelocaldns_ip | ansible.utils.ipwrap }}:{{ nodelocaldns_health_port }}
|
||||
{% if dns_etchosts | default(None) %}
|
||||
hosts /etc/coredns/hosts {
|
||||
fallthrough
|
||||
@@ -132,7 +132,7 @@ data:
|
||||
force_tcp
|
||||
}
|
||||
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
|
||||
health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }}
|
||||
health {{ nodelocaldns_ip | ansible.utils.ipwrap }}:{{ nodelocaldns_second_health_port }}
|
||||
{% if dns_etchosts | default(None) %}
|
||||
hosts /etc/coredns/hosts {
|
||||
fallthrough
|
||||
|
||||
@@ -116,7 +116,7 @@ flannel_version: 0.27.3
|
||||
flannel_cni_version: 1.7.1-flannel1
|
||||
cni_version: "{{ (cni_binary_checksums['amd64'] | dict2items)[0].key }}"
|
||||
|
||||
cilium_version: "1.18.6"
|
||||
cilium_version: "1.19.1"
|
||||
cilium_cli_version: "{{ (ciliumcli_binary_checksums['amd64'] | dict2items)[0].key }}"
|
||||
cilium_enable_hubble: false
|
||||
|
||||
|
||||
5
roles/network_facts/defaults/main.yml
Normal file
5
roles/network_facts/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
# Additional string host to inject into NO_PROXY
|
||||
additional_no_proxy: ""
|
||||
additional_no_proxy_list: "{{ additional_no_proxy | split(',') }}"
|
||||
no_proxy_exclude_workers: false
|
||||
@@ -1,41 +1,63 @@
|
||||
---
|
||||
- name: Set facts variables
|
||||
tags:
|
||||
- always
|
||||
block:
|
||||
- name: Gather node IPs
|
||||
setup:
|
||||
gather_subset: '!all,!min,network'
|
||||
filter: "ansible_default_ip*"
|
||||
when: ansible_default_ipv4 is not defined or ansible_default_ipv6 is not defined
|
||||
ignore_unreachable: true
|
||||
- name: Gather node IPs
|
||||
setup:
|
||||
gather_subset: '!all,!min,network'
|
||||
filter: "ansible_default_ip*"
|
||||
when: ansible_default_ipv4 is not defined or ansible_default_ipv6 is not defined
|
||||
ignore_unreachable: true
|
||||
|
||||
- name: Set computed IPs varables
|
||||
vars:
|
||||
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
|
||||
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
|
||||
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
|
||||
# ansible_default_ipv4 isn't what you think.
|
||||
_ipv4: "{{ ip | default(fallback_ip) }}"
|
||||
_access_ipv4: "{{ access_ip | default(_ipv4) }}"
|
||||
_ipv6: "{{ ip6 | default(fallback_ip6) }}"
|
||||
_access_ipv6: "{{ access_ip6 | default(_ipv6) }}"
|
||||
_access_ips:
|
||||
- "{{ _access_ipv4 if ipv4_stack }}"
|
||||
- "{{ _access_ipv6 if ipv6_stack }}"
|
||||
_ips:
|
||||
- "{{ _ipv4 if ipv4_stack }}"
|
||||
- "{{ _ipv6 if ipv6_stack }}"
|
||||
set_fact:
|
||||
cacheable: true
|
||||
main_access_ip: "{{ _access_ipv4 if ipv4_stack else _access_ipv6 }}"
|
||||
main_ip: "{{ _ipv4 if ipv4_stack else _ipv6 }}"
|
||||
# Mixed IPs - for dualstack
|
||||
main_access_ips: "{{ _access_ips | select }}"
|
||||
main_ips: "{{ _ips | select }}"
|
||||
- name: Set computed IPs variables
|
||||
vars:
|
||||
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
|
||||
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
|
||||
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
|
||||
# ansible_default_ipv4 isn't what you think.
|
||||
_ipv4: "{{ ip | default(fallback_ip) }}"
|
||||
_access_ipv4: "{{ access_ip | default(_ipv4) }}"
|
||||
_ipv6: "{{ ip6 | default(fallback_ip6) }}"
|
||||
_access_ipv6: "{{ access_ip6 | default(_ipv6) }}"
|
||||
_access_ips:
|
||||
- "{{ _access_ipv4 if ipv4_stack }}"
|
||||
- "{{ _access_ipv6 if ipv6_stack }}"
|
||||
_ips:
|
||||
- "{{ _ipv4 if ipv4_stack }}"
|
||||
- "{{ _ipv6 if ipv6_stack }}"
|
||||
set_fact:
|
||||
cacheable: true
|
||||
main_access_ip: "{{ _access_ipv4 if ipv4_stack else _access_ipv6 }}"
|
||||
main_ip: "{{ _ipv4 if ipv4_stack else _ipv6 }}"
|
||||
# Mixed IPs - for dualstack
|
||||
main_access_ips: "{{ _access_ips | select }}"
|
||||
main_ips: "{{ _ips | select }}"
|
||||
|
||||
- name: Set no_proxy
|
||||
import_tasks: no_proxy.yml
|
||||
when:
|
||||
- http_proxy is defined or https_proxy is defined
|
||||
- no_proxy is not defined
|
||||
- name: Set no_proxy to all assigned cluster IPs and hostnames
|
||||
when:
|
||||
- http_proxy is defined or https_proxy is defined
|
||||
- no_proxy is not defined
|
||||
vars:
|
||||
groups_with_no_proxy:
|
||||
- kube_control_plane
|
||||
- "{{ '' if no_proxy_exclude_workers else 'kube_node' }}" # TODO: exclude by a boolean in inventory rather than global variable
|
||||
- etcd
|
||||
- calico_rr
|
||||
hosts_with_no_proxy: "{{ groups_with_no_proxy | select | map('extract', groups) | select('defined') | flatten }}"
|
||||
_hostnames: "{{ (hosts_with_no_proxy +
|
||||
(hosts_with_no_proxy | map('extract', hostvars, morekeys=['ansible_hostname'])
|
||||
| select('defined')))
|
||||
| unique }}"
|
||||
no_proxy_prepare:
|
||||
- "{{ apiserver_loadbalancer_domain_name | d('') }}"
|
||||
- "{{ loadbalancer_apiserver.address if loadbalancer_apiserver is defined else '' }}"
|
||||
- "{{ hosts_with_no_proxy | map('extract', hostvars, morekeys=['main_access_ip']) }}"
|
||||
- "{{ _hostnames }}"
|
||||
- "{{ _hostnames | map('regex_replace', '$', '.' + dns_domain ) }}"
|
||||
- "{{ additional_no_proxy_list }}"
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- "{{ kube_service_subnets }}"
|
||||
- "{{ kube_pods_subnets }}"
|
||||
- svc
|
||||
- "svc.{{ dns_domain }}"
|
||||
set_fact:
|
||||
no_proxy: "{{ no_proxy_prepare | select | flatten | unique | join(',') }}"
|
||||
run_once: true
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
- name: Set no_proxy to all assigned cluster IPs and hostnames
|
||||
set_fact:
|
||||
# noqa: jinja[spacing]
|
||||
no_proxy_prepare: >-
|
||||
{%- if loadbalancer_apiserver is defined -%}
|
||||
{{ apiserver_loadbalancer_domain_name }},
|
||||
{{ loadbalancer_apiserver.address | default('') }},
|
||||
{%- endif -%}
|
||||
{%- if no_proxy_exclude_workers | default(false) -%}
|
||||
{% set cluster_or_control_plane = 'kube_control_plane' %}
|
||||
{%- else -%}
|
||||
{% set cluster_or_control_plane = 'k8s_cluster' %}
|
||||
{%- endif -%}
|
||||
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
|
||||
{{ hostvars[item]['main_access_ip'] }},
|
||||
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
||||
{{ hostvars[item]['ansible_hostname'] }},
|
||||
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
|
||||
{%- endif -%}
|
||||
{{ item }},{{ item }}.{{ dns_domain }},
|
||||
{%- endfor -%}
|
||||
{%- if additional_no_proxy is defined -%}
|
||||
{{ additional_no_proxy }},
|
||||
{%- endif -%}
|
||||
127.0.0.1,localhost,{{ kube_service_subnets }},{{ kube_pods_subnets }},svc,svc.{{ dns_domain }}
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
delegate_facts: true
|
||||
become: false
|
||||
run_once: true
|
||||
|
||||
- name: Populates no_proxy to all hosts
|
||||
set_fact:
|
||||
no_proxy: "{{ hostvars.localhost.no_proxy_prepare | select }}"
|
||||
# noqa: jinja[spacing]
|
||||
proxy_env: "{{ proxy_env | combine({
|
||||
'no_proxy': hostvars.localhost.no_proxy_prepare,
|
||||
'NO_PROXY': hostvars.localhost.no_proxy_prepare
|
||||
}) }}"
|
||||
@@ -177,6 +177,9 @@ rules:
|
||||
- blockaffinities
|
||||
- caliconodestatuses
|
||||
- tiers
|
||||
- stagednetworkpolicies
|
||||
- stagedglobalnetworkpolicies
|
||||
- stagedkubernetesnetworkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
||||
@@ -215,3 +215,17 @@ rules:
|
||||
- calico-cni-plugin
|
||||
verbs:
|
||||
- create
|
||||
{% if calico_version is version('3.29.0', '>=') %}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-tier-getter
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "projectcalico.org"
|
||||
resources:
|
||||
- "tiers"
|
||||
verbs:
|
||||
- "get"
|
||||
{% endif %}
|
||||
|
||||
@@ -26,3 +26,18 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
{% if calico_version is version('3.29.0', '>=') %}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-tier-getter
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-tier-getter
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:kube-controller-manager
|
||||
{% endif %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% for cilium_bgp_advertisement in cilium_bgp_advertisements %}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumBGPAdvertisement
|
||||
metadata:
|
||||
name: "{{ cilium_bgp_advertisement.name }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% for cilium_bgp_cluster_config in cilium_bgp_cluster_configs %}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumBGPClusterConfig
|
||||
metadata:
|
||||
name: "{{ cilium_bgp_cluster_config.name }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% for cilium_bgp_node_config_override in cilium_bgp_node_config_overrides %}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumBGPNodeConfigOverride
|
||||
metadata:
|
||||
name: "{{ cilium_bgp_node_config_override.name }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% for cilium_bgp_peer_config in cilium_bgp_peer_configs %}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumBGPPeerConfig
|
||||
metadata:
|
||||
name: "{{ cilium_bgp_peer_config.name }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% for cilium_loadbalancer_ip_pool in cilium_loadbalancer_ip_pools %}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: "{{ cilium_loadbalancer_ip_pool.name }}"
|
||||
|
||||
@@ -62,8 +62,8 @@ cni:
|
||||
|
||||
autoDirectNodeRoutes: {{ cilium_auto_direct_node_routes | to_json }}
|
||||
|
||||
ipv4NativeRoutingCIDR: {{ cilium_native_routing_cidr }}
|
||||
ipv6NativeRoutingCIDR: {{ cilium_native_routing_cidr_ipv6 }}
|
||||
ipv4NativeRoutingCIDR: "{{ cilium_native_routing_cidr }}"
|
||||
ipv6NativeRoutingCIDR: "{{ cilium_native_routing_cidr_ipv6 }}"
|
||||
|
||||
encryption:
|
||||
enabled: {{ cilium_encryption_enabled | to_json }}
|
||||
@@ -143,6 +143,14 @@ cgroup:
|
||||
enabled: {{ cilium_cgroup_auto_mount | to_json }}
|
||||
hostRoot: {{ cilium_cgroup_host_root }}
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: "{{ cilium_memory_limit }}"
|
||||
cpu: "{{ cilium_cpu_limit }}"
|
||||
requests:
|
||||
memory: "{{ cilium_memory_requests }}"
|
||||
cpu: "{{ cilium_cpu_requests }}"
|
||||
|
||||
operator:
|
||||
image:
|
||||
repository: {{ cilium_operator_image_repo }}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
--grace-period {{ drain_grace_period }}
|
||||
--timeout {{ drain_timeout }}
|
||||
--delete-emptydir-data {{ kube_override_hostname }}
|
||||
async: "{{ (drain_timeout | regex_replace('s$', '') | int) + 120 }}"
|
||||
poll: 15
|
||||
when:
|
||||
- groups['kube_control_plane'] | length > 0
|
||||
# ignore servers that are not nodes
|
||||
|
||||
@@ -59,6 +59,8 @@
|
||||
--timeout {{ drain_timeout }}
|
||||
--delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
|
||||
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
|
||||
async: "{{ (drain_timeout | regex_replace('s$', '') | int) + 120 }}"
|
||||
poll: 15
|
||||
when: drain_nodes
|
||||
register: result
|
||||
failed_when:
|
||||
@@ -82,6 +84,8 @@
|
||||
--delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
|
||||
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
|
||||
--disable-eviction
|
||||
async: "{{ (drain_fallback_timeout | regex_replace('s$', '') | int) + 120 }}"
|
||||
poll: 15
|
||||
register: drain_fallback_result
|
||||
until: drain_fallback_result.rc == 0
|
||||
retries: "{{ drain_fallback_retries }}"
|
||||
|
||||
@@ -3,8 +3,11 @@
|
||||
cloud_image: openeuler-2403
|
||||
vm_memory: 3072
|
||||
|
||||
# Openeuler package mgmt is slow for some reason
|
||||
pkg_install_timeout: "{{ 10 * 60 }}"
|
||||
# Use metalink for faster package downloads (auto-selects closest mirror)
|
||||
openeuler_metalink_enabled: true
|
||||
|
||||
# CI package installation takes ~7min; default 5min is too tight, use 15min for margin
|
||||
pkg_install_timeout: "{{ 15 * 60 }}"
|
||||
|
||||
# Work around so the Kubernetes 1.35 tests can pass. We will discuss the openeuler support later.
|
||||
kubeadm_ignore_preflight_errors:
|
||||
|
||||
@@ -13,3 +13,21 @@ kube_owner: root
|
||||
# Node Feature Discovery
|
||||
node_feature_discovery_enabled: true
|
||||
kube_asymmetric_encryption_algorithm: "ECDSA-P256"
|
||||
|
||||
# Testing no_proxy setup
|
||||
# The proxy is not intended to be accessed at all, we're only testing
|
||||
# the no_proxy construction
|
||||
https_proxy: "http://some-proxy.invalid"
|
||||
http_proxy: "http://some-proxy.invalid"
|
||||
additional_no_proxy_list:
|
||||
- github.com
|
||||
- githubusercontent.com
|
||||
- k8s.io
|
||||
- rockylinux.org
|
||||
- docker.io
|
||||
- googleapis.com
|
||||
- quay.io
|
||||
- pkg.dev
|
||||
- amazonaws.com
|
||||
- cilium.io
|
||||
skip_http_proxy_on_os_packages: true
|
||||
|
||||
Reference in New Issue
Block a user