Compare commits

..

9 Commits

Author SHA1 Message Date
github-actions[bot]
1648b754f6 Patch versions updates 2026-03-18 03:22:52 +00:00
Ali Afsharzadeh
e979e770f2 Fix calico api server permissions (#13101)
Signed-off-by: Ali Afsharzadeh <afsharzadeh8@gmail.com>
2026-03-17 16:19:50 +05:30
Ali Afsharzadeh
b1e3816b2f Add calico-tier-getter RBAC (#13100)
Signed-off-by: Ali Afsharzadeh <afsharzadeh8@gmail.com>
2026-03-17 16:19:42 +05:30
Vitaly
391b08c645 fix: use nodelocaldns_ip with ipv6 address (#13087) 2026-03-17 16:07:38 +05:30
NoNE
39b97464be Use async/poll on drain tasks to prevent SSH connection timeouts (#13081) 2026-03-17 14:31:36 +05:30
Kay Yan
3c6d368397 ci(openeuler): improve mirror selection and stabilize CI checks (#13094)
Enable openEuler metalink and clear dnf cache after repo updates so package downloads use refreshed mirror metadata. Keep openeuler24-calico in the main CI matrix with a longer package timeout, and clean up failed pods before running-state checks to reduce transient CI noise.


Made-with: Cursor

Made-with: Cursor

Made-with: Cursor

Made-with: Cursor

Signed-off-by: Kay Yan <kay.yan@daocloud.io>
2026-03-17 13:29:37 +05:30
Max Gautier
03d17fea92 proxy: Fix the no_proxy variable (#12981)
* CI: add no_proxy regression test

* proxy: Fix the no_proxy variable

Since 2.29, probably due to a change in ansible templating, the no_proxy
variable is rendered as an array of character rather than a string.

This results in broken cluster in some case.

Eliminate the custom jinja looping to use filters and list flatteing +
join instead.
Also simplify some things (no separate tasks file, just use `run_once`
instead of delegating to localhost)
2026-03-17 03:45:37 +05:30
Cheprasov Daniil
dbb8527560 docs(etcd): clarify etcd metrics scraping with listen-metrics-urls (#13059) 2026-03-16 14:37:39 +05:30
Shaleen Bathla
7acdc4df64 cilium: honor resource limits and requests by default (#13092)
Signed-off-by: Shaleen Bathla <shaleen.bathla@servicenow.com>
2026-03-16 08:49:40 +05:30
18 changed files with 205 additions and 88 deletions

View File

@@ -57,6 +57,7 @@ pr:
- ubuntu24-kube-router-svc-proxy
- ubuntu24-ha-separate-etcd
- fedora40-flannel-crio-collection-scale
- openeuler24-calico
# This is for flakey test so they don't disrupt the PR worklflow too much.
# Jobs here MUST have a open issue so we don't lose sight of them
@@ -67,7 +68,6 @@ pr-flakey:
matrix:
- TESTCASE:
- flatcar4081-calico # https://github.com/kubernetes-sigs/kubespray/issues/12309
- openeuler24-calico # https://github.com/kubernetes-sigs/kubespray/issues/12877
# The ubuntu24-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
ubuntu24-calico-all-in-one:

View File

@@ -32,12 +32,12 @@ etcd_metrics_service_labels:
k8s-app: etcd
app.kubernetes.io/managed-by: Kubespray
app: kube-prometheus-stack-kube-etcd
release: prometheus-stack
release: kube-prometheus-stack
```
The last two labels in the above example allows to scrape the metrics from the
[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack)
chart with the following Helm `values.yaml` :
chart when it is installed with the release name `kube-prometheus-stack` and the following Helm `values.yaml`:
```yaml
kubeEtcd:
@@ -45,8 +45,22 @@ kubeEtcd:
enabled: false
```
To fully override metrics exposition urls, define it in the inventory with:
If your Helm release name is different, adjust the `release` label accordingly.
To fully override metrics exposition URLs, define it in the inventory with:
```yaml
etcd_listen_metrics_urls: "http://0.0.0.0:2381"
```
If you choose to expose metrics on specific node IPs (for example `10.141.4.22`, `10.141.4.23`, `10.141.4.24`) in `etcd_listen_metrics_urls`,
you can configure kube-prometheus-stack to scrape those endpoints directly with:
```yaml
kubeEtcd:
enabled: true
endpoints:
- 10.141.4.22
- 10.141.4.23
- 10.141.4.24
```

View File

@@ -16,6 +16,8 @@
- name: Gather and compute network facts
import_role:
name: network_facts
tags:
- always
- name: Gather minimal facts
setup:
gather_subset: '!all'

View File

@@ -12,6 +12,10 @@ coreos_locksmithd_disable: false
# Install epel repo on Centos/RHEL
epel_enabled: false
## openEuler specific variables
# Enable metalink for openEuler repos (auto-selects fastest mirror by location)
openeuler_metalink_enabled: false
## Oracle Linux specific variables
# Install public repo on Oracle Linux
use_oracle_public_repo: true

View File

@@ -1,3 +1,43 @@
---
- name: Import Centos boostrap for openEuler
import_tasks: centos.yml
- name: Import CentOS bootstrap for openEuler
ansible.builtin.import_tasks: centos.yml
- name: Get existing openEuler repo sections
ansible.builtin.shell:
cmd: "set -o pipefail && grep '^\\[' /etc/yum.repos.d/openEuler.repo | tr -d '[]'"
executable: /bin/bash
register: _openeuler_repo_sections
changed_when: false
failed_when: false
check_mode: false
become: true
when: openeuler_metalink_enabled
- name: Enable metalink for openEuler repos
community.general.ini_file:
path: /etc/yum.repos.d/openEuler.repo
section: "{{ item.key }}"
option: metalink
value: "{{ item.value }}"
no_extra_spaces: true
mode: "0644"
loop: "{{ _openeuler_metalink_repos | dict2items | selectattr('key', 'in', _openeuler_repo_sections.stdout_lines | default([])) }}"
become: true
when: openeuler_metalink_enabled
register: _openeuler_metalink_result
vars:
_openeuler_metalink_repos:
OS: "https://mirrors.openeuler.org/metalink?repo=$releasever/OS&arch=$basearch"
everything: "https://mirrors.openeuler.org/metalink?repo=$releasever/everything&arch=$basearch"
EPOL: "https://mirrors.openeuler.org/metalink?repo=$releasever/EPOL/main&arch=$basearch"
debuginfo: "https://mirrors.openeuler.org/metalink?repo=$releasever/debuginfo&arch=$basearch"
source: "https://mirrors.openeuler.org/metalink?repo=$releasever&arch=source"
update: "https://mirrors.openeuler.org/metalink?repo=$releasever/update&arch=$basearch"
update-source: "https://mirrors.openeuler.org/metalink?repo=$releasever/update&arch=source"
- name: Clean dnf cache to apply metalink mirror selection
ansible.builtin.command: dnf clean all
become: true
when:
- openeuler_metalink_enabled
- _openeuler_metalink_result.changed

View File

@@ -45,7 +45,7 @@ data:
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_health_port }}
health {{ nodelocaldns_ip | ansible.utils.ipwrap }}:{{ nodelocaldns_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough
@@ -132,7 +132,7 @@ data:
force_tcp
}
prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }}
health {{ nodelocaldns_ip }}:{{ nodelocaldns_second_health_port }}
health {{ nodelocaldns_ip | ansible.utils.ipwrap }}:{{ nodelocaldns_second_health_port }}
{% if dns_etchosts | default(None) %}
hosts /etc/coredns/hosts {
fallthrough

View File

@@ -662,6 +662,7 @@ cri_dockerd_archive_checksums:
0.3.5: sha256:30d47bd89998526d51a8518f9e8ef10baed408ab273879ee0e30350702092938
runc_checksums:
arm64:
1.3.5: sha256:bd843d75a788e612c9df286b1fa519a44fcbb7a7b8d01e2268431433cc7c718c
1.3.4: sha256:d6dcab36d1b6af1b72c7f0662e5fcf446a291271ba6006532b95c4144e19d428
1.3.3: sha256:3c9a8e9e6dafd00db61f4611692447ebab4a56388bae4f82192aed67b66df712
1.3.2: sha256:06fbccb4528ecd490f3f333d6dcf22c876bd72a024813a0c0a46312121f4c5fd
@@ -686,6 +687,7 @@ runc_checksums:
1.1.9: sha256:b43e9f561e85906f469eef5a7b7992fc586f750f44a0e011da4467e7008c33a0
1.1.8: sha256:7c22cb618116d1d5216d79e076349f93a672253d564b19928a099c20e4acd658
amd64:
1.3.5: sha256:66fa8390be8fb3b23dfbb60c767368bb5b51f1acfa88692bbff1a82953d4d9e9
1.3.4: sha256:5966ca40b6187b30e33bfc299c5f1fe72e8c1aa01cf3fefdadf391668f47f103
1.3.3: sha256:8781ab9f71c12f314d21c8e85f13ca1a82d90cf475aa5131a7b543fcc5487543
1.3.2: sha256:e7a8e30bd6d248f494aae9163521ff4eb112a30602ac56ada0871e3531269c2d
@@ -710,6 +712,7 @@ runc_checksums:
1.1.9: sha256:b9bfdd4cb27cddbb6172a442df165a80bfc0538a676fbca1a6a6c8f4c6933b43
1.1.8: sha256:1d05ed79854efc707841dfc7afbf3b86546fc1d0b3a204435ca921c14af8385b
ppc64le:
1.3.5: sha256:62e8f062291c2b2b29bd8ab8c983cef56409063287e256c50ab54fb54f5d98a7
1.3.4: sha256:268d9be1188f3efa82cad0d8e6b938d8da0d741427660d874ca9386c68d72937
1.3.3: sha256:c42394e7cf7cd508a91b090b72d57ff4df262effde742d5e29ea607e65f38b43
1.3.2: sha256:9373062bc547b5afe44fb0122a12aaa980763969d4b69dd17134a6a292838ce5

View File

@@ -0,0 +1,5 @@
---
# Additional string host to inject into NO_PROXY
additional_no_proxy: ""
additional_no_proxy_list: "{{ additional_no_proxy | split(',') }}"
no_proxy_exclude_workers: false

View File

@@ -1,41 +1,63 @@
---
- name: Set facts variables
tags:
- always
block:
- name: Gather node IPs
setup:
gather_subset: '!all,!min,network'
filter: "ansible_default_ip*"
when: ansible_default_ipv4 is not defined or ansible_default_ipv6 is not defined
ignore_unreachable: true
- name: Gather node IPs
setup:
gather_subset: '!all,!min,network'
filter: "ansible_default_ip*"
when: ansible_default_ipv4 is not defined or ansible_default_ipv6 is not defined
ignore_unreachable: true
- name: Set computed IPs varables
vars:
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
# ansible_default_ipv4 isn't what you think.
_ipv4: "{{ ip | default(fallback_ip) }}"
_access_ipv4: "{{ access_ip | default(_ipv4) }}"
_ipv6: "{{ ip6 | default(fallback_ip6) }}"
_access_ipv6: "{{ access_ip6 | default(_ipv6) }}"
_access_ips:
- "{{ _access_ipv4 if ipv4_stack }}"
- "{{ _access_ipv6 if ipv6_stack }}"
_ips:
- "{{ _ipv4 if ipv4_stack }}"
- "{{ _ipv6 if ipv6_stack }}"
set_fact:
cacheable: true
main_access_ip: "{{ _access_ipv4 if ipv4_stack else _access_ipv6 }}"
main_ip: "{{ _ipv4 if ipv4_stack else _ipv6 }}"
# Mixed IPs - for dualstack
main_access_ips: "{{ _access_ips | select }}"
main_ips: "{{ _ips | select }}"
- name: Set computed IPs variables
vars:
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
# ansible_default_ipv4 isn't what you think.
_ipv4: "{{ ip | default(fallback_ip) }}"
_access_ipv4: "{{ access_ip | default(_ipv4) }}"
_ipv6: "{{ ip6 | default(fallback_ip6) }}"
_access_ipv6: "{{ access_ip6 | default(_ipv6) }}"
_access_ips:
- "{{ _access_ipv4 if ipv4_stack }}"
- "{{ _access_ipv6 if ipv6_stack }}"
_ips:
- "{{ _ipv4 if ipv4_stack }}"
- "{{ _ipv6 if ipv6_stack }}"
set_fact:
cacheable: true
main_access_ip: "{{ _access_ipv4 if ipv4_stack else _access_ipv6 }}"
main_ip: "{{ _ipv4 if ipv4_stack else _ipv6 }}"
# Mixed IPs - for dualstack
main_access_ips: "{{ _access_ips | select }}"
main_ips: "{{ _ips | select }}"
- name: Set no_proxy
import_tasks: no_proxy.yml
when:
- http_proxy is defined or https_proxy is defined
- no_proxy is not defined
- name: Set no_proxy to all assigned cluster IPs and hostnames
when:
- http_proxy is defined or https_proxy is defined
- no_proxy is not defined
vars:
groups_with_no_proxy:
- kube_control_plane
- "{{ '' if no_proxy_exclude_workers else 'kube_node' }}" # TODO: exclude by a boolean in inventory rather than global variable
- etcd
- calico_rr
hosts_with_no_proxy: "{{ groups_with_no_proxy | select | map('extract', groups) | select('defined') | flatten }}"
_hostnames: "{{ (hosts_with_no_proxy +
(hosts_with_no_proxy | map('extract', hostvars, morekeys=['ansible_hostname'])
| select('defined')))
| unique }}"
no_proxy_prepare:
- "{{ apiserver_loadbalancer_domain_name | d('') }}"
- "{{ loadbalancer_apiserver.address if loadbalancer_apiserver is defined else '' }}"
- "{{ hosts_with_no_proxy | map('extract', hostvars, morekeys=['main_access_ip']) }}"
- "{{ _hostnames }}"
- "{{ _hostnames | map('regex_replace', '$', '.' + dns_domain ) }}"
- "{{ additional_no_proxy_list }}"
- 127.0.0.1
- localhost
- "{{ kube_service_subnets }}"
- "{{ kube_pods_subnets }}"
- svc
- "svc.{{ dns_domain }}"
set_fact:
no_proxy: "{{ no_proxy_prepare | select | flatten | unique | join(',') }}"
run_once: true

View File

@@ -1,40 +0,0 @@
---
- name: Set no_proxy to all assigned cluster IPs and hostnames
set_fact:
# noqa: jinja[spacing]
no_proxy_prepare: >-
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name }},
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- if no_proxy_exclude_workers | default(false) -%}
{% set cluster_or_control_plane = 'kube_control_plane' %}
{%- else -%}
{% set cluster_or_control_plane = 'k8s_cluster' %}
{%- endif -%}
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
{{ hostvars[item]['main_access_ip'] }},
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%}
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
{%- if additional_no_proxy is defined -%}
{{ additional_no_proxy }},
{%- endif -%}
127.0.0.1,localhost,{{ kube_service_subnets }},{{ kube_pods_subnets }},svc,svc.{{ dns_domain }}
delegate_to: localhost
connection: local
delegate_facts: true
become: false
run_once: true
- name: Populates no_proxy to all hosts
set_fact:
no_proxy: "{{ hostvars.localhost.no_proxy_prepare | select }}"
# noqa: jinja[spacing]
proxy_env: "{{ proxy_env | combine({
'no_proxy': hostvars.localhost.no_proxy_prepare,
'NO_PROXY': hostvars.localhost.no_proxy_prepare
}) }}"

View File

@@ -177,6 +177,9 @@ rules:
- blockaffinities
- caliconodestatuses
- tiers
- stagednetworkpolicies
- stagedglobalnetworkpolicies
- stagedkubernetesnetworkpolicies
verbs:
- get
- list

View File

@@ -215,3 +215,17 @@ rules:
- calico-cni-plugin
verbs:
- create
{% if calico_version is version('3.29.0', '>=') %}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-tier-getter
rules:
- apiGroups:
- "projectcalico.org"
resources:
- "tiers"
verbs:
- "get"
{% endif %}

View File

@@ -26,3 +26,18 @@ subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
{% if calico_version is version('3.29.0', '>=') %}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-tier-getter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-tier-getter
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:kube-controller-manager
{% endif %}

View File

@@ -143,6 +143,14 @@ cgroup:
enabled: {{ cilium_cgroup_auto_mount | to_json }}
hostRoot: {{ cilium_cgroup_host_root }}
resources:
limits:
memory: "{{ cilium_memory_limit }}"
cpu: "{{ cilium_cpu_limit }}"
requests:
memory: "{{ cilium_memory_requests }}"
cpu: "{{ cilium_cpu_requests }}"
operator:
image:
repository: {{ cilium_operator_image_repo }}

View File

@@ -17,6 +17,8 @@
--grace-period {{ drain_grace_period }}
--timeout {{ drain_timeout }}
--delete-emptydir-data {{ kube_override_hostname }}
async: "{{ (drain_timeout | regex_replace('s$', '') | int) + 120 }}"
poll: 15
when:
- groups['kube_control_plane'] | length > 0
# ignore servers that are not nodes

View File

@@ -59,6 +59,8 @@
--timeout {{ drain_timeout }}
--delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
async: "{{ (drain_timeout | regex_replace('s$', '') | int) + 120 }}"
poll: 15
when: drain_nodes
register: result
failed_when:
@@ -82,6 +84,8 @@
--delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
{% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
--disable-eviction
async: "{{ (drain_fallback_timeout | regex_replace('s$', '') | int) + 120 }}"
poll: 15
register: drain_fallback_result
until: drain_fallback_result.rc == 0
retries: "{{ drain_fallback_retries }}"

View File

@@ -3,8 +3,11 @@
cloud_image: openeuler-2403
vm_memory: 3072
# Openeuler package mgmt is slow for some reason
pkg_install_timeout: "{{ 10 * 60 }}"
# Use metalink for faster package downloads (auto-selects closest mirror)
openeuler_metalink_enabled: true
# CI package installation takes ~7min; default 5min is too tight, use 15min for margin
pkg_install_timeout: "{{ 15 * 60 }}"
# Work around so the Kubernetes 1.35 tests can pass. We will discuss the openeuler support later.
kubeadm_ignore_preflight_errors:

View File

@@ -13,3 +13,21 @@ kube_owner: root
# Node Feature Discovery
node_feature_discovery_enabled: true
kube_asymmetric_encryption_algorithm: "ECDSA-P256"
# Testing no_proxy setup
# The proxy is not intended to be accessed at all, we're only testing
# the no_proxy construction
https_proxy: "http://some-proxy.invalid"
http_proxy: "http://some-proxy.invalid"
additional_no_proxy_list:
- github.com
- githubusercontent.com
- k8s.io
- rockylinux.org
- docker.io
- googleapis.com
- quay.io
- pkg.dev
- amazonaws.com
- cilium.io
skip_http_proxy_on_os_packages: true