mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 05:45:06 +03:00
Fold kubernetes-apps/network_plugin into network_plugin (#12506)
For what I can see, there is no reason for the split, and it makes things confusing.
This commit is contained in:
@@ -81,7 +81,6 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray_defaults }
|
- { role: kubespray_defaults }
|
||||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|||||||
@@ -73,7 +73,6 @@
|
|||||||
- { role: kubespray_defaults }
|
- { role: kubespray_defaults }
|
||||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
|
||||||
- name: Finally handle worker upgrades, based on given batch size
|
- name: Finally handle worker upgrades, based on given batch size
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
# TODO: Handle Calico etcd -> kdd migration
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Flannel | Start Resources
|
|
||||||
kube:
|
|
||||||
name: "{{ item.item.name }}"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item.item.type }}"
|
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
|
||||||
state: "latest"
|
|
||||||
with_items: "{{ flannel_node_manifests.results }}"
|
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
|
||||||
|
|
||||||
- name: Flannel | Wait for flannel subnet.env file presence
|
|
||||||
wait_for:
|
|
||||||
path: /run/flannel/subnet.env
|
|
||||||
delay: 5
|
|
||||||
timeout: 600
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Kube-OVN | Start Resources
|
|
||||||
kube:
|
|
||||||
name: "{{ item.item.name }}"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
|
||||||
state: "latest"
|
|
||||||
with_items: "{{ kube_ovn_node_manifests.results }}"
|
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Kube-router | Start Resources
|
|
||||||
kube:
|
|
||||||
name: "kube-router"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
filename: "{{ kube_config_dir }}/kube-router.yml"
|
|
||||||
resource: "ds"
|
|
||||||
namespace: "kube-system"
|
|
||||||
state: "latest"
|
|
||||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Kube-router | Wait for kube-router pods to be ready
|
|
||||||
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
|
|
||||||
register: pods_not_ready
|
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
|
||||||
retries: 30
|
|
||||||
delay: 10
|
|
||||||
ignore_errors: true
|
|
||||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
|
||||||
run_once: true
|
|
||||||
changed_when: false
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
- role: kubernetes-apps/network_plugin/calico
|
|
||||||
when: kube_network_plugin == 'calico'
|
|
||||||
tags:
|
|
||||||
- calico
|
|
||||||
|
|
||||||
- role: kubernetes-apps/network_plugin/flannel
|
|
||||||
when: kube_network_plugin == 'flannel'
|
|
||||||
tags:
|
|
||||||
- flannel
|
|
||||||
|
|
||||||
- role: kubernetes-apps/network_plugin/kube-ovn
|
|
||||||
when: kube_network_plugin == 'kube-ovn'
|
|
||||||
tags:
|
|
||||||
- kube-ovn
|
|
||||||
|
|
||||||
- role: kubernetes-apps/network_plugin/kube-router
|
|
||||||
when: kube_network_plugin == 'kube-router'
|
|
||||||
tags:
|
|
||||||
- kube-router
|
|
||||||
|
|
||||||
- role: kubernetes-apps/network_plugin/multus
|
|
||||||
when: kube_network_plugin_multus
|
|
||||||
tags:
|
|
||||||
- multus
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Multus | Start resources
|
|
||||||
kube:
|
|
||||||
name: "{{ item.item.name }}"
|
|
||||||
namespace: "kube-system"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item.item.type }}"
|
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
|
||||||
state: "latest"
|
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
|
||||||
run_once: true
|
|
||||||
with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.item.name if item != None else 'skipped' }}"
|
|
||||||
vars:
|
|
||||||
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
|
|
||||||
when:
|
|
||||||
- not item is skipped
|
|
||||||
@@ -19,3 +19,20 @@
|
|||||||
register: flannel_node_manifests
|
register: flannel_node_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
|
- name: Flannel | Start Resources
|
||||||
|
kube:
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item.item.type }}"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ flannel_node_manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
||||||
|
- name: Flannel | Wait for flannel subnet.env file presence
|
||||||
|
wait_for:
|
||||||
|
path: /run/flannel/subnet.env
|
||||||
|
delay: 5
|
||||||
|
timeout: 600
|
||||||
|
|||||||
@@ -15,3 +15,12 @@
|
|||||||
- {name: ovn, file: cni-ovn.yml}
|
- {name: ovn, file: cni-ovn.yml}
|
||||||
- {name: kube-ovn, file: cni-kube-ovn.yml}
|
- {name: kube-ovn, file: cni-kube-ovn.yml}
|
||||||
register: kube_ovn_node_manifests
|
register: kube_ovn_node_manifests
|
||||||
|
|
||||||
|
- name: Kube-OVN | Start Resources
|
||||||
|
kube:
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ kube_ovn_node_manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|||||||
@@ -60,3 +60,25 @@
|
|||||||
mode: "0644"
|
mode: "0644"
|
||||||
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
- name: Kube-router | Start Resources
|
||||||
|
kube:
|
||||||
|
name: "kube-router"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/kube-router.yml"
|
||||||
|
resource: "ds"
|
||||||
|
namespace: "kube-system"
|
||||||
|
state: "latest"
|
||||||
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Kube-router | Wait for kube-router pods to be ready
|
||||||
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
|
||||||
|
register: pods_not_ready
|
||||||
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
|
retries: 30
|
||||||
|
delay: 10
|
||||||
|
ignore_errors: true
|
||||||
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
|
run_once: true
|
||||||
|
changed_when: false
|
||||||
|
|||||||
@@ -34,3 +34,21 @@
|
|||||||
- item.engine in container_manager_types
|
- item.engine in container_manager_types
|
||||||
- hostvars[inventory_hostname].container_manager == item.engine
|
- hostvars[inventory_hostname].container_manager == item.engine
|
||||||
- inventory_hostname == vars_from_node
|
- inventory_hostname == vars_from_node
|
||||||
|
|
||||||
|
- name: Multus | Start resources
|
||||||
|
kube:
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item.item.type }}"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
state: "latest"
|
||||||
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.item.name if item != None else 'skipped' }}"
|
||||||
|
vars:
|
||||||
|
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
|
||||||
|
when:
|
||||||
|
- not item is skipped
|
||||||
|
|||||||
Reference in New Issue
Block a user