diff --git a/.gitignore b/.gitignore
index e50e78e22..3f7924496 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,9 +12,9 @@ temp
*.tfstate
*.tfstate.backup
contrib/terraform/aws/credentials.tfvars
-**/*.sw[pon]
/ssh-bastion.conf
**/*.sw[pon]
+*~
vagrant/
# Byte-compiled / optimized / DLL files
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3e31ce1bd..5ba68ab05 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -93,7 +93,7 @@ before_script:
# Check out latest tag if testing upgrade
# Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- - test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
+ - test "${UPGRADE_TEST}" != "false" && git checkout 8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
diff --git a/OWNERS b/OWNERS
index 6ecbee5c9..1883a4930 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,9 +1,7 @@
# See the OWNERS file documentation:
-# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
+# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
-owners:
- - Smana
- - ant31
- - bogdando
- - mattymo
- - rsmitty
+approvers:
+ - kubespray-approvers
+reviewers:
+ - kubespray-reviewers
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
new file mode 100644
index 000000000..bbb73ece4
--- /dev/null
+++ b/OWNERS_ALIASES
@@ -0,0 +1,17 @@
+aliases:
+ kubespray-approvers:
+ - ant31
+ - mattymo
+ - atoms
+ - chadswen
+ - rsmitty
+ - bogdando
+ - bradbeam
+ - woopstar
+ - riverzhang
+ - holser
+ - smana
+ kubespray-reviewers:
+ - jjungnickel
+ - archifleks
+ - chapsuk
diff --git a/README.md b/README.md
index 37a8610d0..aad80b7df 100644
--- a/README.md
+++ b/README.md
@@ -5,11 +5,11 @@ Deploy a Production Ready Kubernetes Cluster
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
-- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
-- **High available** cluster
-- **Composable** (Choice of the network plugin for instance)
-- Support most popular **Linux distributions**
-- **Continuous integration tests**
+- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
+- **Highly available** cluster
+- **Composable** (Choice of the network plugin for instance)
+- Supports most popular **Linux distributions**
+- **Continuous integration tests**
Quick Start
-----------
@@ -17,6 +17,7 @@ Quick Start
To deploy the cluster you can use :
### Ansible
+
# Install dependencies from ``requirements.txt``
sudo pip install -r requirements.txt
@@ -36,19 +37,16 @@ To deploy the cluster you can use :
### Vagrant
- For Vagrant we need to install python dependencies for provisioning tasks.\
- Check if Python and pip are installed:
-```sh
-python -v && pip -v
-```
-
- If this returns the version of the software, you're good to go. If not, download and install Python from here https://www.python.org/downloads/source/
- Install the necessary requirements
-
-```sh
-sudo pip install -r requirements.txt
-vagrant up
-```
+For Vagrant we need to install python dependencies for provisioning tasks.
+Check if Python and pip are installed:
+
+ python -V && pip -V
+
+If this returns the version of the software, you're good to go. If not, download and install Python from here
+Install the necessary requirements
+
+ sudo pip install -r requirements.txt
+ vagrant up
Documents
---------
@@ -88,19 +86,25 @@ Supported Linux Distributions
Note: Upstart/SysV init based OS types are not supported.
-Versions of supported components
---------------------------------
+Supported Components
+--------------------
-- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.10.2
-- [etcd](https://github.com/coreos/etcd/releases) v3.2.16
-- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
-- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
-- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
-- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
-- [contiv](https://github.com/contiv/install/releases) v1.1.7
-- [weave](http://weave.works/) v2.3.0
-- [docker](https://www.docker.com/) v17.03 (see note)
-- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
+- Core
+ - [kubernetes](https://github.com/kubernetes/kubernetes) v1.11.2
+ - [etcd](https://github.com/coreos/etcd) v3.2.18
+ - [docker](https://www.docker.com/) v17.03 (see note)
+ - [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
+- Network Plugin
+ - [calico](https://github.com/projectcalico/calico) v2.6.8
+ - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
+ - [cilium](https://github.com/cilium/cilium) v1.1.2
+ - [contiv](https://github.com/contiv/install) v1.1.7
+ - [flanneld](https://github.com/coreos/flannel) v0.10.0
+ - [weave](https://github.com/weaveworks/weave) v2.4.0
+- Application
+ - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v1.1.0-k8s1.10
+ - [cert-manager](https://github.com/jetstack/cert-manager) v0.4.1
+ - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.18.0
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
@@ -135,7 +139,7 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
-- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
+- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
diff --git a/Vagrantfile b/Vagrantfile
index d0b6b73d1..df650f1e8 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -44,6 +44,8 @@ $kube_node_instances_with_disks = false
$kube_node_instances_with_disks_size = "20G"
$kube_node_instances_with_disks_number = 2
+$playbook = "cluster.yml"
+
$local_release_dir = "/vagrant/temp"
host_vars = {}
@@ -157,7 +159,7 @@ Vagrant.configure("2") do |config|
# when all the machines are up and ready.
if i == $num_instances
config.vm.provision "ansible" do |ansible|
- ansible.playbook = "cluster.yml"
+ ansible.playbook = $playbook
if File.exist?(File.join(File.dirname($inventory), "hosts"))
ansible.inventory_path = $inventory
end
diff --git a/cluster.yml b/cluster.yml
index 9bfd2ff42..8462ea894 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -37,7 +37,7 @@
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{proxy_env}}"
- hosts: etcd:k8s-cluster:vault:calico-rr
@@ -51,7 +51,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: true }
+ - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md
index c15d3ecf2..b83aeeb9b 100644
--- a/contrib/azurerm/README.md
+++ b/contrib/azurerm/README.md
@@ -9,8 +9,8 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
## Requirements
-- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
-- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
+- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
+- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
- Dedicated Resource Group created in the Azure Portal or through azure-cli
## Configuration through group_vars/all
diff --git a/contrib/network-storage/glusterfs/group_vars b/contrib/network-storage/glusterfs/group_vars
index d64da8dc6..6a3f85e47 120000
--- a/contrib/network-storage/glusterfs/group_vars
+++ b/contrib/network-storage/glusterfs/group_vars
@@ -1 +1 @@
-../../../inventory/group_vars
\ No newline at end of file
+../../../inventory/local/group_vars
\ No newline at end of file
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
index 5ca493867..b9f0d2d1d 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
@@ -2,7 +2,7 @@
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
-glusterfs_ppa_version: "3.8"
+glusterfs_ppa_version: "4.1"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
index 1c8763388..ef9a71eba 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
@@ -2,7 +2,7 @@
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
-glusterfs_ppa_version: "3.8"
+glusterfs_ppa_version: "3.12"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
index 13c595f74..e931068ae 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
@@ -1,2 +1,2 @@
---
-glusterfs_daemon: glusterfs-server
+glusterfs_daemon: glusterd
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index 2354deac0..709d0633f 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -17,10 +17,10 @@ This project will create:
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
```
-export AWS_ACCESS_KEY_ID="www"
-export AWS_SECRET_ACCESS_KEY ="xxx"
-export AWS_SSH_KEY_NAME="yyy"
-export AWS_DEFAULT_REGION="zzz"
+export TF_VAR_AWS_ACCESS_KEY_ID="www"
+export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
+export TF_VAR_AWS_SSH_KEY_NAME="yyy"
+export TF_VAR_AWS_DEFAULT_REGION="zzz"
```
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf
index 9c0617d84..1ff584f0c 100644
--- a/contrib/terraform/aws/create-infrastructure.tf
+++ b/contrib/terraform/aws/create-infrastructure.tf
@@ -181,7 +181,7 @@ data "template_file" "inventory" {
resource "null_resource" "inventories" {
provisioner "local-exec" {
- command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
+ command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers {
diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars
index 99ea64eed..c5b1dbff1 100644
--- a/contrib/terraform/aws/terraform.tfvars
+++ b/contrib/terraform/aws/terraform.tfvars
@@ -31,3 +31,5 @@ default_tags = {
# Env = "devtest"
# Product = "kubernetes"
}
+
+inventory_file = "../../../inventory/hosts"
diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf
index 58dd31388..37aab2bae 100644
--- a/contrib/terraform/aws/variables.tf
+++ b/contrib/terraform/aws/variables.tf
@@ -103,3 +103,7 @@ variable "default_tags" {
description = "Default tags for all resources"
type = "map"
}
+
+variable "inventory_file" {
+ description = "Where to store the generated inventory file"
+}
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index de717fb69..15b101fe1 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -32,7 +32,11 @@ floating IP addresses or not.
- Kubernetes worker nodes
Note that the Ansible script will report an invalid configuration if you wind up
-with an even number of etcd instances since that is not a valid configuration.
+with an even number of etcd instances since that is not a valid configuration. This
+restriction includes standalone etcd nodes that are deployed in a cluster along with
+master nodes with etcd replicas. As an example, if you have three master nodes with
+etcd replicas and three standalone etcd nodes, the script will fail since there are
+now six total etcd replicas.
### GlusterFS
The Terraform configuration supports provisioning of an optional GlusterFS
@@ -219,6 +223,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
+|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
#### Terraform state files
diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf
index c501302de..8e5d05adf 100644
--- a/contrib/terraform/openstack/kubespray.tf
+++ b/contrib/terraform/openstack/kubespray.tf
@@ -3,6 +3,7 @@ module "network" {
external_net = "${var.external_net}"
network_name = "${var.network_name}"
+ subnet_cidr = "${var.subnet_cidr}"
cluster_name = "${var.cluster_name}"
dns_nameservers = "${var.dns_nameservers}"
}
@@ -24,6 +25,7 @@ module "compute" {
source = "modules/compute"
cluster_name = "${var.cluster_name}"
+ az_list = "${var.az_list}"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_etcd = "${var.number_of_etcd}"
@@ -49,6 +51,7 @@ module "compute" {
k8s_node_fips = "${module.ips.k8s_node_fips}"
bastion_fips = "${module.ips.bastion_fips}"
supplementary_master_groups = "${var.supplementary_master_groups}"
+ supplementary_node_groups = "${var.supplementary_node_groups}"
network_id = "${module.network.router_id}"
}
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index 940049aa9..05026ed0b 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -59,6 +59,17 @@ resource "openstack_compute_secgroup_v2" "k8s" {
self = true
}
}
+resource "openstack_compute_secgroup_v2" "worker" {
+ name = "${var.cluster_name}-k8s-worker"
+ description = "${var.cluster_name} - Kubernetes worker nodes"
+
+ rule {
+ ip_protocol = "tcp"
+ from_port = "30000"
+ to_port = "32767"
+ cidr = "0.0.0.0/0"
+ }
+}
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index+1}"
@@ -91,6 +102,7 @@ resource "openstack_compute_instance_v2" "bastion" {
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.number_of_k8s_masters}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -120,6 +132,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.number_of_k8s_masters_no_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -148,6 +161,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.number_of_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -169,6 +183,7 @@ resource "openstack_compute_instance_v2" "etcd" {
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -193,6 +208,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -216,6 +232,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.number_of_k8s_nodes}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -226,12 +243,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
+ "${openstack_compute_secgroup_v2.worker.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-node,k8s-cluster"
+ kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
}
@@ -244,6 +262,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.number_of_k8s_nodes_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -253,12 +272,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
+ "${openstack_compute_secgroup_v2.worker.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-node,k8s-cluster,no-floating"
+ kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
}
@@ -292,6 +312,7 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf
index 58ab17067..50a6e496c 100644
--- a/contrib/terraform/openstack/modules/compute/variables.tf
+++ b/contrib/terraform/openstack/modules/compute/variables.tf
@@ -1,5 +1,9 @@
variable "cluster_name" {}
+variable "az_list" {
+ type = "list"
+}
+
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
@@ -59,3 +63,7 @@ variable "bastion_fips" {
variable "supplementary_master_groups" {
default = ""
}
+
+variable "supplementary_node_groups" {
+ default = ""
+}
diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf
index 2c461c784..7c02869d4 100644
--- a/contrib/terraform/openstack/modules/network/main.tf
+++ b/contrib/terraform/openstack/modules/network/main.tf
@@ -12,7 +12,7 @@ resource "openstack_networking_network_v2" "k8s" {
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
network_id = "${openstack_networking_network_v2.k8s.id}"
- cidr = "10.0.0.0/24"
+ cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
}
diff --git a/contrib/terraform/openstack/modules/network/variables.tf b/contrib/terraform/openstack/modules/network/variables.tf
index a7952bced..6494358aa 100644
--- a/contrib/terraform/openstack/modules/network/variables.tf
+++ b/contrib/terraform/openstack/modules/network/variables.tf
@@ -7,3 +7,5 @@ variable "cluster_name" {}
variable "dns_nameservers" {
type = "list"
}
+
+variable "subnet_cidr" {}
diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tf b/contrib/terraform/openstack/sample-inventory/cluster.tf
index 7830d2159..a793bfaa5 100644
--- a/contrib/terraform/openstack/sample-inventory/cluster.tf
+++ b/contrib/terraform/openstack/sample-inventory/cluster.tf
@@ -41,5 +41,6 @@ number_of_k8s_nodes_no_floating_ip = 4
# networking
network_name = ""
external_net = ""
+subnet_cidr = ""
floatingip_pool = ""
diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf
index d49746c92..dc4ddae90 100644
--- a/contrib/terraform/openstack/variables.tf
+++ b/contrib/terraform/openstack/variables.tf
@@ -2,6 +2,12 @@ variable "cluster_name" {
default = "example"
}
+variable "az_list" {
+ description = "List of Availability Zones available in your OpenStack cluster"
+ type = "list"
+ default = ["nova"]
+}
+
variable "number_of_bastions" {
default = 1
}
@@ -97,6 +103,12 @@ variable "network_name" {
default = "internal"
}
+variable "subnet_cidr" {
+ description = "Subnet CIDR block."
+ type = "string"
+ default = "10.0.0.0/24"
+}
+
variable "dns_nameservers" {
description = "An array of DNS name server names used by hosts in this subnet."
type = "list"
@@ -116,3 +128,8 @@ variable "supplementary_master_groups" {
description = "supplementary kubespray ansible groups for masters, such kube-node"
default = ""
}
+
+variable "supplementary_node_groups" {
+ description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
+ default = ""
+}
diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py
index 955d5155b..6feaed42a 100755
--- a/contrib/terraform/terraform.py
+++ b/contrib/terraform/terraform.py
@@ -706,6 +706,10 @@ def query_list(hosts):
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
+ # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
+ # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
+ if not group: group = "all"
+
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 1deb88776..92689eee5 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
-#### dnsmasq_kubedns (default)
+#### dnsmasq_kubedns
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
-#### kubedns
+#### kubedns (default)
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 2402ac54f..ff21856c3 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -38,9 +38,9 @@ See more details in the [ansible guide](ansible.md).
Adding nodes
------------
-You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
+You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
-- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
@@ -51,11 +51,26 @@ Remove nodes
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
-- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Run the ansible-playbook command, substituting `remove-node.yml`:
+Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+
+ ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+ --private-key=~/.ssh/private_key
+
+
+We support two ways to select the nodes:
+
+- Use `--extra-vars "node=,"` to select the node you want to delete.
```
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
- --private-key=~/.ssh/private_key
+ --private-key=~/.ssh/private_key \
+ --extra-vars "node=nodename,nodename2"
+```
+or
+- Use `--limit nodename,nodename2` to select the node
+```
+ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+ --private-key=~/.ssh/private_key \
+ --limit nodename,nodename2"
```
Connecting to Kubernetes
diff --git a/docs/openstack.md b/docs/openstack.md
index 7a4368e2e..ca1e89ae1 100644
--- a/docs/openstack.md
+++ b/docs/openstack.md
@@ -3,7 +3,7 @@ OpenStack
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
-After that make sure to source in your OpenStack credentials like you would do when using `nova-client` by using `source path/to/your/openstack-rc`.
+After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
@@ -12,35 +12,34 @@ Unless you are using calico you can now run the playbook.
**Additional step needed when using calico:**
-Calico does not encapsulate all packages with the hosts ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
+Calico does not encapsulate all packages with the hosts' ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
+
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
-In order to make calico work on OpenStack you will need to tell OpenStack to allow calicos packages by allowing the network it uses.
+In order to make calico work on OpenStack you will need to tell OpenStack to allow calico's packages by allowing the network it uses.
First you will need the ids of your OpenStack instances that will run kubernetes:
- nova list --tenant Your-Tenant
+ openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
-Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports:
+Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
- neutron port-list -c id -c device_id
+ openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
-Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
-Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
-and `kube_pods_subnet` (default `10.233.64.0/18`.)
+Given the port ids on the left, you can set the two `allowed_address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
# allow kube_service_addresses and kube_pods_subnet network
- neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
- neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
+ openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
+ openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
Now you can finally run the playbook.
diff --git a/docs/upgrades.md b/docs/upgrades.md
index 6297976dd..620e07a6b 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -81,3 +81,61 @@ kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
recreated. All other invalidated service account tokens are cleaned up
automatically, but other pods are not deleted out of an abundance of caution
for impact to user deployed pods.
+
+### Component-based upgrades
+
+A deployer may want to upgrade specific components in order to minimize risk
+or save time. This strategy is not covered by CI as of this writing, so it is
+not guaranteed to work.
+
+These commands are useful only for upgrading fully-deployed, healthy, existing
+hosts. This will definitely not work for undeployed or partially deployed
+hosts.
+
+Upgrade docker:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker
+```
+
+Upgrade etcd:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
+```
+
+Upgrade vault:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
+```
+
+Upgrade kubelet:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens
+```
+
+Upgrade Kubernetes master components:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master
+```
+
+Upgrade network plugins:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network
+```
+
+Upgrade all add-ons:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps
+```
+
+Upgrade just helm (assuming `helm_enabled` is true):
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm
+```
diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml
index 267c724ee..a669805c7 100644
--- a/extra_playbooks/build-cephfs-provisioner.yml
+++ b/extra_playbooks/build-cephfs-provisioner.yml
@@ -8,8 +8,8 @@
version: "{{ item.version }}"
state: "{{ item.state }}"
with_items:
- - { state: "present", name: "docker", version: "3.2.1" }
- - { state: "present", name: "docker-compose", version: "1.21.0" }
+ - { state: "present", name: "docker", version: "3.4.1" }
+ - { state: "present", name: "docker-compose", version: "1.21.2" }
- name: CephFS Provisioner | Check Go version
shell: |
@@ -35,19 +35,19 @@
- name: CephFS Provisioner | Clone repo
git:
repo: https://github.com/kubernetes-incubator/external-storage.git
- dest: "~/go/src/github.com/kubernetes-incubator"
- version: a71a49d4
- clone: no
+ dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
+ version: 06fddbe2
+ clone: yes
update: yes
- name: CephFS Provisioner | Build image
shell: |
cd ~/go/src/github.com/kubernetes-incubator/external-storage
- REGISTRY=quay.io/kubespray/ VERSION=a71a49d4 make ceph/cephfs
+ REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
- name: CephFS Provisioner | Push image
docker_image:
- name: quay.io/kubespray/cephfs-provisioner:a71a49d4
+ name: quay.io/kubespray/cephfs-provisioner:06fddbe2
push: yes
retries: 10
diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml
index d856d064c..65e8c6590 100644
--- a/inventory/sample/group_vars/all.yml
+++ b/inventory/sample/group_vars/all.yml
@@ -131,3 +131,6 @@ bin_dir: /usr/local/bin
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
#kube_read_only_port: 10255
+
+# Does coreos need auto upgrade, default is true
+#coreos_auto_upgrade: true
\ No newline at end of file
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
index 52a9a2079..2ca718598 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -19,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.10.2
+kube_version: v1.11.2
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@@ -67,25 +67,21 @@ kube_users:
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
-weave_mtu: 1376
+# Weave deployment
+# weave_password: ~
+# weave_checkpoint_disable: false
+# weave_conn_limit: 100
+# weave_hairpin_mode: true
+# weave_ipalloc_range: {{ kube_pods_subnet }}
+# weave_expect_npc: {{ enable_network_policy }}
+# weave_kube_peers: ~
+# weave_ipalloc_init: ~
+# weave_expose_ip: ~
+# weave_metrics_addr: ~
+# weave_status_addr: ~
+# weave_mtu: 1376
+# weave_no_masq_local: true
+# weave_extra_args: ~
# Enable kubernetes network policies
enable_network_policy: false
@@ -140,12 +136,21 @@ dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
+## Used to set docker daemon iptables options to true
+#docker_iptables_enabled: "true"
+
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_options: >
+ --insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
+ {% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+ --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+ --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+ --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+ {% endif %}
docker_bin_dir: "/usr/bin"
## If non-empty will override default system MounFlags value.
@@ -164,6 +169,9 @@ helm_deployment_type: host
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
+# audit log for kubernetes
+kubernetes_audit: false
+
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
@@ -174,9 +182,6 @@ efk_enabled: false
# Helm deployment
helm_enabled: false
-# Istio deployment
-istio_enabled: false
-
# Registry deployment
registry_enabled: false
# registry_namespace: "{{ system_namespace }}"
@@ -192,19 +197,21 @@ local_volume_provisioner_enabled: false
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
-# cephfs_provisioner_namespace: "{{ system_namespace }}"
+# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
-# cephfs_provisioner_monitors:
-# - 172.24.0.1:6789
-# - 172.24.0.2:6789
-# - 172.24.0.3:6789
+# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
+# cephfs_provisioner_reclaim_policy: Delete
+# cephfs_provisioner_claim_root: /volumes
+# cephfs_provisioner_deterministic_names: true
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
+# ingress_nginx_nodeselector:
+# node-role.kubernetes.io/master: "true"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini
index bddfa2f80..ad38aedf2 100644
--- a/inventory/sample/hosts.ini
+++ b/inventory/sample/hosts.ini
@@ -26,11 +26,6 @@
# node5
# node6
-# [kube-ingress]
-# node2
-# node3
-
# [k8s-cluster:children]
# kube-master
# kube-node
-# kube-ingress
diff --git a/library/vault_cert_issue.py b/library/vault_cert_issue.py
deleted file mode 100644
index c0d198ae3..000000000
--- a/library/vault_cert_issue.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/usr/bin/env python
-DOCUMENTATION = '''
----
-module: hashivault_pki_issue
-version_added: "0.1"
-short_description: Hashicorp Vault PKI issue module
-description:
- - Module to issue PKI certs from Hashicorp Vault.
-options:
- url:
- description:
- - url for vault
- default: to environment variable VAULT_ADDR
- ca_cert:
- description:
- - "path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate"
- default: to environment variable VAULT_CACERT
- ca_path:
- description:
- - "path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate : if ca_cert is specified, its value will take precedence"
- default: to environment variable VAULT_CAPATH
- client_cert:
- description:
- - "path to a PEM-encoded client certificate for TLS authentication to the Vault server"
- default: to environment variable VAULT_CLIENT_CERT
- client_key:
- description:
- - "path to an unencrypted PEM-encoded private key matching the client certificate"
- default: to environment variable VAULT_CLIENT_KEY
- verify:
- description:
- - "if set, do not verify presented TLS certificate before communicating with Vault server : setting this variable is not recommended except during testing"
- default: to environment variable VAULT_SKIP_VERIFY
- authtype:
- description:
- - "authentication type to use: token, userpass, github, ldap, approle"
- default: token
- token:
- description:
- - token for vault
- default: to environment variable VAULT_TOKEN
- username:
- description:
- - username to login to vault.
- default: to environment variable VAULT_USER
- password:
- description:
- - password to login to vault.
- default: to environment variable VAULT_PASSWORD
- secret:
- description:
- - secret to read.
- data:
- description:
- - Keys and values to write.
- update:
- description:
- - Update rather than overwrite.
- default: False
- min_ttl:
- description:
- - Issue new cert if existing cert has lower TTL expressed in hours or a percentage. Examples: 70800h, 50%
- force:
- description:
- - Force issue of new cert
-
-'''
-EXAMPLES = '''
----
-- hosts: localhost
- tasks:
- - hashivault_write:
- secret: giant
- data:
- foo: foe
- fie: fum
-'''
-
-
-def main():
- argspec = hashivault_argspec()
- argspec['secret'] = dict(required=True, type='str')
- argspec['update'] = dict(required=False, default=False, type='bool')
- argspec['data'] = dict(required=False, default={}, type='dict')
- module = hashivault_init(argspec, supports_check_mode=True)
- result = hashivault_write(module)
- if result.get('failed'):
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-def _convert_to_seconds(original_value):
- try:
- value = str(original_value)
- seconds = 0
- if 'h' in value:
- ray = value.split('h')
- seconds = int(ray.pop(0)) * 3600
- value = ''.join(ray)
- if 'm' in value:
- ray = value.split('m')
- seconds += int(ray.pop(0)) * 60
- value = ''.join(ray)
- if value:
- ray = value.split('s')
- seconds += int(ray.pop(0))
- return seconds
- except Exception:
- pass
- return original_value
-
-def hashivault_needs_refresh(old_data, min_ttl):
- print("Checking refresh")
- print_r(old_data)
- return False
-# if sorted(old_data.keys()) != sorted(new_data.keys()):
-# return True
-# for key in old_data:
-# old_value = old_data[key]
-# new_value = new_data[key]
-# if old_value == new_value:
-# continue
-# if key != 'ttl' and key != 'max_ttl':
-# return True
-# old_value = _convert_to_seconds(old_value)
-# new_value = _convert_to_seconds(new_value)
-# if old_value != new_value:
-# return True
-# return False
-#
-def hashivault_changed(old_data, new_data):
- if sorted(old_data.keys()) != sorted(new_data.keys()):
- return True
- for key in old_data:
- old_value = old_data[key]
- new_value = new_data[key]
- if old_value == new_value:
- continue
- if key != 'ttl' and key != 'max_ttl':
- return True
- old_value = _convert_to_seconds(old_value)
- new_value = _convert_to_seconds(new_value)
- if old_value != new_value:
- return True
- return False
-
-
-from ansible.module_utils.hashivault import *
-
-
-@hashiwrapper
-def hashivault_write(module):
- result = {"changed": False, "rc": 0}
- params = module.params
- client = hashivault_auth_client(params)
- secret = params.get('secret')
- force = params.get('force', False)
- min_ttl = params.get('min_ttl', "100%")
- returned_data = None
-
- if secret.startswith('/'):
- secret = secret.lstrip('/')
- #else:
- # secret = ('secret/%s' % secret)
- data = params.get('data')
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- changed = True
- write_data = data
-
- if params.get('update') or module.check_mode:
- # Do not move this read outside of the update
- read_data = client.read(secret) or {}
- read_data = read_data.get('data', {})
-
- write_data = dict(read_data)
- write_data.update(data)
-
- result['write_data'] = write_data
- result['read_data'] = read_data
- changed = hashivault_changed(read_data, write_data)
- if not changed:
- changed = hashivault_needs_refresh(read_data, min_ttl)
-
- if changed:
- if not module.check_mode:
- returned_data = client.write((secret), **write_data)
-
- if returned_data:
- result['data'] = returned_data
- result['msg'] = "Secret %s written" % secret
- result['changed'] = changed
- return result
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/remove-node.yml b/remove-node.yml
index e39432f02..0fae1a994 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -5,7 +5,7 @@
ansible_ssh_pipelining: true
gather_facts: true
-- hosts: etcd:k8s-cluster:vault:calico-rr
+- hosts: "{{ node | default('etcd:k8s-cluster:vault:calico-rr') }}"
vars_prompt:
name: "delete_nodes_confirmation"
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
@@ -22,7 +22,7 @@
roles:
- { role: remove-node/pre-remove, tags: pre-remove }
-- hosts: kube-node
+- hosts: "{{ node | default('kube-node') }}"
roles:
- { role: kubespray-defaults }
- { role: reset, tags: reset }
diff --git a/roles/bootstrap-os/defaults/main.yml b/roles/bootstrap-os/defaults/main.yml
index c191ebd2b..5d2f7321a 100644
--- a/roles/bootstrap-os/defaults/main.yml
+++ b/roles/bootstrap-os/defaults/main.yml
@@ -4,3 +4,6 @@ pip_python_coreos_modules:
- six
override_system_hostname: true
+
+
+coreos_auto_upgrade: true
diff --git a/roles/bootstrap-os/files/bootstrap.sh b/roles/bootstrap-os/files/bootstrap.sh
index a2ad29b6c..dbef6c8b1 100644
--- a/roles/bootstrap-os/files/bootstrap.sh
+++ b/roles/bootstrap-os/files/bootstrap.sh
@@ -18,7 +18,11 @@ mv -n pypy-$PYPY_VERSION-linux64 pypy
## library fixup
mkdir -p pypy/lib
-ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
+if [ -f /lib64/libncurses.so.5.9 ]; then
+ ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
+elif [ -f /lib64/libncurses.so.6.1 ]; then
+ ln -snf /lib64/libncurses.so.6.1 $BINDIR/pypy/lib/libtinfo.so.5
+fi
cat > $BINDIR/python < 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3
diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2
index e343a994e..296f5a8a1 100644
--- a/roles/docker/templates/docker-options.conf.j2
+++ b/roles/docker/templates/docker-options.conf.j2
@@ -1,6 +1,5 @@
[Service]
-Environment="DOCKER_OPTS={{ docker_options | default('') }} \
---iptables=false"
+Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }}
{% endif %}
diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml
index a17cd7575..8138996c1 100644
--- a/roles/docker/vars/debian.yml
+++ b/roles/docker/vars/debian.yml
@@ -9,6 +9,7 @@ docker_versioned_pkg:
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
'17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
+ '17.09': docker-ce=17.09.0~ce-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
diff --git a/roles/docker/vars/redhat-aarch64.yml b/roles/docker/vars/redhat-aarch64.yml
new file mode 100644
index 000000000..0bad0593d
--- /dev/null
+++ b/roles/docker/vars/redhat-aarch64.yml
@@ -0,0 +1,28 @@
+---
+docker_kernel_min_version: '0'
+
+# overide defaults, missing 17.03 for aarch64
+docker_version: '1.13'
+
+# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
+# or do 'yum --showduplicates list docker'
+docker_versioned_pkg:
+ 'latest': docker
+ '1.12': docker-1.12.6-48.git0fdc778.el7
+ '1.13': docker-1.13.1-63.git94f4240.el7
+
+# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
+# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
+
+docker_package_info:
+ pkg_mgr: yum
+ pkgs:
+ - name: "{{ docker_versioned_pkg[docker_version | string] }}"
+
+docker_repo_key_info:
+ pkg_key: ''
+ repo_keys: []
+
+docker_repo_info:
+ pkg_repo: ''
+ repos: []
diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml
index cd53e284c..57970eb50 100644
--- a/roles/docker/vars/redhat.yml
+++ b/roles/docker/vars/redhat.yml
@@ -11,6 +11,7 @@ docker_versioned_pkg:
'1.12': docker-engine-1.12.6-1.el7.centos
'1.13': docker-engine-1.13.1-1.el7.centos
'17.03': docker-ce-17.03.2.ce-1.el7.centos
+ '17.09': docker-ce-17.09.0.ce-1.el7.centos
'stable': docker-ce-17.03.2.ce-1.el7.centos
'edge': docker-ce-17.12.1.ce-1.el7.centos
diff --git a/roles/docker/vars/ubuntu.yml b/roles/docker/vars/ubuntu.yml
index f4d6b1e0f..cab1c0824 100644
--- a/roles/docker/vars/ubuntu.yml
+++ b/roles/docker/vars/ubuntu.yml
@@ -8,6 +8,7 @@ docker_versioned_pkg:
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
'17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '17.09': docker-ce=17.09.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 26656a16d..ebc53a4be 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -27,9 +27,9 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
image_arch: amd64
# Versions
-kube_version: v1.10.2
+kube_version: v1.11.2
kubeadm_version: "{{ kube_version }}"
-etcd_version: v3.2.16
+etcd_version: v3.2.18
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v2.6.8"
@@ -39,21 +39,18 @@ calico_policy_version: "v1.0.3"
calico_rr_version: "v0.4.2"
flannel_version: "v0.10.0"
flannel_cni_version: "v0.3.0"
-istio_version: "0.2.6"
vault_version: 0.10.1
-weave_version: 2.3.0
+weave_version: "2.4.0"
pod_infra_version: 3.0
contiv_version: 1.1.7
-cilium_version: "v1.0.0-rc8"
+cilium_version: "v1.1.2"
# Download URLs
-istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
# Checksums
-istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-kubeadm_checksum: 394d7d340214c91d669186cf4f2110d8eb840ca965399b4d8b22d0545a60e377
+kubeadm_checksum: 6b17720a65b8ff46efe92a5544f149c39a221910d89939838d75581d4e6924c0
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
# Containers
@@ -73,22 +70,6 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
-istio_proxy_image_repo: docker.io/istio/proxy
-istio_proxy_image_tag: "{{ istio_version }}"
-istio_proxy_init_image_repo: docker.io/istio/proxy_init
-istio_proxy_init_image_tag: "{{ istio_version }}"
-istio_ca_image_repo: docker.io/istio/istio-ca
-istio_ca_image_tag: "{{ istio_version }}"
-istio_mixer_image_repo: docker.io/istio/mixer
-istio_mixer_image_tag: "{{ istio_version }}"
-istio_pilot_image_repo: docker.io/istio/pilot
-istio_pilot_image_tag: "{{ istio_version }}"
-istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
-istio_proxy_debug_image_tag: "{{ istio_version }}"
-istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
-istio_sidecar_initializer_image_tag: "{{ istio_version }}"
-istio_statsd_image_repo: prom/statsd-exporter
-istio_statsd_image_tag: latest
hyperkube_image_repo: "gcr.io/google-containers/hyperkube-{{ image_arch }}"
hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
@@ -120,7 +101,7 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}"
kubedns_version: 1.14.10
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
kubedns_image_tag: "{{ kubedns_version }}"
-coredns_version: 1.1.2
+coredns_version: 1.2.0
coredns_image_repo: "docker.io/coredns/coredns"
coredns_image_tag: "{{ coredns_version }}"
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny--{{ image_arch }}"
@@ -135,14 +116,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
-elasticsearch_version: "v2.4.1"
-elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
+elasticsearch_version: "v5.6.4"
+elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
elasticsearch_image_tag: "{{ elasticsearch_version }}"
-fluentd_version: "1.22"
-fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
+fluentd_version: "v2.0.4"
+fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
fluentd_image_tag: "{{ fluentd_version }}"
-kibana_version: "v4.6.1"
-kibana_image_repo: "gcr.io/google_containers/kibana"
+kibana_version: "5.6.4"
+kibana_image_repo: "docker.elastic.co/kibana/kibana"
kibana_image_tag: "{{ kibana_version }}"
helm_version: "v2.9.1"
helm_image_repo: "lachlanevenson/k8s-helm"
@@ -156,18 +137,16 @@ registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
-local_volume_provisioner_image_tag: "v2.0.0"
-cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
-cephfs_provisioner_image_tag: "a71a49d4"
+local_volume_provisioner_image_tag: "v2.1.0"
+cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
+cephfs_provisioner_image_tag: "v1.1.0-k8s1.10"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
-ingress_nginx_controller_image_tag: "0.14.0"
+ingress_nginx_controller_image_tag: "0.18.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4"
-cert_manager_version: "v0.2.4"
+cert_manager_version: "v0.4.1"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
-cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
-cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
downloads:
netcheck_server:
@@ -207,83 +186,6 @@ downloads:
mode: "0755"
groups:
- k8s-cluster
- istioctl:
- enabled: "{{ istio_enabled }}"
- file: true
- version: "{{ istio_version }}"
- dest: "istio/istioctl"
- sha256: "{{ istioctl_checksum }}"
- source_url: "{{ istioctl_download_url }}"
- url: "{{ istioctl_download_url }}"
- unarchive: false
- owner: "root"
- mode: "0755"
- groups:
- - kube-master
- istio_proxy:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_proxy_image_repo }}"
- tag: "{{ istio_proxy_image_tag }}"
- sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_proxy_init:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_proxy_init_image_repo }}"
- tag: "{{ istio_proxy_init_image_tag }}"
- sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_ca:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_ca_image_repo }}"
- tag: "{{ istio_ca_image_tag }}"
- sha256: "{{ istio_ca_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_mixer:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_mixer_image_repo }}"
- tag: "{{ istio_mixer_image_tag }}"
- sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_pilot:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_pilot_image_repo }}"
- tag: "{{ istio_pilot_image_tag }}"
- sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_proxy_debug:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_proxy_debug_image_repo }}"
- tag: "{{ istio_proxy_debug_image_tag }}"
- sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_sidecar_initializer:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_sidecar_initializer_image_repo }}"
- tag: "{{ istio_sidecar_initializer_image_tag }}"
- sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
- groups:
- - kube-node
- istio_statsd:
- enabled: "{{ istio_enabled }}"
- container: true
- repo: "{{ istio_statsd_image_repo }}"
- tag: "{{ istio_statsd_image_tag }}"
- sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
- groups:
- - kube-node
hyperkube:
enabled: true
container: true
@@ -569,7 +471,7 @@ downloads:
tag: "{{ ingress_nginx_controller_image_tag }}"
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
groups:
- - kube-ingress
+ - kube-node
ingress_nginx_default_backend:
enabled: "{{ ingress_nginx_enabled }}"
container: true
@@ -577,7 +479,7 @@ downloads:
tag: "{{ ingress_nginx_default_backend_image_tag }}"
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
groups:
- - kube-ingress
+ - kube-node
cert_manager_controller:
enabled: "{{ cert_manager_enabled }}"
container: true
@@ -586,14 +488,6 @@ downloads:
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
- cert_manager_ingress_shim:
- enabled: "{{ cert_manager_enabled }}"
- container: true
- repo: "{{ cert_manager_ingress_shim_image_repo }}"
- tag: "{{ cert_manager_ingress_shim_image_tag }}"
- sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
- groups:
- - kube-node
download_defaults:
container: false
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 2474b4029..6a317fd89 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -20,6 +20,6 @@
when:
- not skip_downloads|default(false)
- item.value.enabled
- - item.value.container
+ - "{{ item.value.container | default(False) }}"
- download_run_once
- group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 7a9b73e38..87a73cfbd 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -9,7 +9,7 @@
- name: Register docker images info
raw: >-
- {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
+ {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} if .RepoTags {{ '}}' }}{{ '{{' }} (index .RepoTags 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}{{ '{{' }} if .RepoDigests {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}" | tr '\n' ','
no_log: true
register: docker_images
failed_when: false
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 041214903..750b710f2 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -3,6 +3,9 @@
etcd_cluster_setup: true
etcd_events_cluster_setup: false
+# Set to true to separate k8s events to a different etcd cluster
+etcd_events_cluster_enabled: false
+
etcd_backup_prefix: "/var/backups"
etcd_data_dir: "/var/lib/etcd"
etcd_events_data_dir: "/var/lib/etcd-events"
diff --git a/roles/etcd/files/make-ssl-etcd.sh b/roles/etcd/files/make-ssl-etcd.sh
index ebf0e2afa..d661a2a0d 100755
--- a/roles/etcd/files/make-ssl-etcd.sh
+++ b/roles/etcd/files/make-ssl-etcd.sh
@@ -95,4 +95,9 @@ if [ -n "$HOSTS" ]; then
fi
# Install certs
+if [ -e "$SSLDIR/ca-key.pem" ]; then
+ # No pass existing CA
+ rm -f ca.pem ca-key.pem
+fi
+
mv *.pem ${SSLDIR}/
diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml
index 1f0f67149..aa3274bd7 100644
--- a/roles/etcd/tasks/gen_certs_vault.yml
+++ b/roles/etcd/tasks/gen_certs_vault.yml
@@ -62,5 +62,3 @@
with_items: "{{ etcd_node_certs_needed|d([]) }}"
when: inventory_hostname in etcd_node_cert_hosts
notify: set etcd_secret_changed
-
-- fail:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index c35a9cab6..38df04d73 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -19,11 +19,17 @@
register: "etcd_client_cert_serial_result"
changed_when: false
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
+ tags:
+ - master
+ - network
- name: Set etcd_client_cert_serial
set_fact:
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout }}"
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
+ tags:
+ - master
+ - network
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
diff --git a/roles/etcd/tasks/sync_etcd_master_certs.yml b/roles/etcd/tasks/sync_etcd_master_certs.yml
index ff33f0a24..3990e569d 100644
--- a/roles/etcd/tasks/sync_etcd_master_certs.yml
+++ b/roles/etcd/tasks/sync_etcd_master_certs.yml
@@ -8,13 +8,15 @@
"member-" + inventory_hostname + ".pem"
] }}
-#- include_tasks: ../../vault/tasks/shared/sync_file.yml
-# vars:
-# sync_file: "{{ item }}"
-# sync_file_dir: "{{ etcd_cert_dir }}"
-# sync_file_hosts: [ "{{ inventory_hostname }}" ]
-# sync_file_is_cert: true
-# with_items: "{{ etcd_master_cert_list|d([]) }}"
+- include_tasks: ../../vault/tasks/shared/sync_file.yml
+ vars:
+ sync_file: "{{ item }}"
+ sync_file_dir: "{{ etcd_cert_dir }}"
+ sync_file_hosts: [ "{{ inventory_hostname }}" ]
+ sync_file_owner: kube
+ sync_file_group: root
+ sync_file_is_cert: true
+ with_items: "{{ etcd_master_cert_list|d([]) }}"
- name: sync_etcd_certs | Set facts for etcd sync_file results
set_fact:
@@ -22,16 +24,16 @@
with_items: "{{ sync_file_results|d([]) }}"
when: item.no_srcs|bool
-#- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
-# set_fact:
-# sync_file_results: []
-#
-#- include_tasks: ../../vault/tasks/shared/sync_file.yml
-# vars:
-# sync_file: ca.pem
-# sync_file_dir: "{{ etcd_cert_dir }}"
-# sync_file_hosts: [ "{{ inventory_hostname }}" ]
-#
-#- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
-# set_fact:
-# sync_file_results: []
+- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
+ set_fact:
+ sync_file_results: []
+
+- include_tasks: ../../vault/tasks/shared/sync_file.yml
+ vars:
+ sync_file: ca.pem
+ sync_file_dir: "{{ etcd_cert_dir }}"
+ sync_file_hosts: [ "{{ inventory_hostname }}" ]
+
+- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
+ set_fact:
+ sync_file_results: []
diff --git a/roles/etcd/tasks/sync_etcd_node_certs.yml b/roles/etcd/tasks/sync_etcd_node_certs.yml
index ffb9e73b8..3e075364f 100644
--- a/roles/etcd/tasks/sync_etcd_node_certs.yml
+++ b/roles/etcd/tasks/sync_etcd_node_certs.yml
@@ -4,30 +4,30 @@
set_fact:
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
-#- include_tasks: ../../vault/tasks/shared/sync_file.yml
-# vars:
-# sync_file: "{{ item }}"
-# sync_file_dir: "{{ etcd_cert_dir }}"
-# sync_file_hosts: [ "{{ inventory_hostname }}" ]
-# sync_file_is_cert: true
-# with_items: "{{ etcd_node_cert_list|d([]) }}"
-#
+- include_tasks: ../../vault/tasks/shared/sync_file.yml
+ vars:
+ sync_file: "{{ item }}"
+ sync_file_dir: "{{ etcd_cert_dir }}"
+ sync_file_hosts: [ "{{ inventory_hostname }}" ]
+ sync_file_is_cert: true
+ with_items: "{{ etcd_node_cert_list|d([]) }}"
+
- name: sync_etcd_node_certs | Set facts for etcd sync_file results
set_fact:
etcd_node_certs_needed: "{{ etcd_node_certs_needed|default([]) + [item.path] }}"
with_items: "{{ sync_file_results|d([]) }}"
when: item.no_srcs|bool
-#- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
-# set_fact:
-# sync_file_results: []
-#
-#- include_tasks: ../../vault/tasks/shared/sync_file.yml
-# vars:
-# sync_file: ca.pem
-# sync_file_dir: "{{ etcd_cert_dir }}"
-# sync_file_hosts: "{{ groups['etcd'] }}"
-#
-#- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
-# set_fact:
-# sync_file_results: []
+- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
+ set_fact:
+ sync_file_results: []
+
+- include_tasks: ../../vault/tasks/shared/sync_file.yml
+ vars:
+ sync_file: ca.pem
+ sync_file_dir: "{{ etcd_cert_dir }}"
+ sync_file_hosts: "{{ groups['etcd'] }}"
+
+- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
+ set_fact:
+ sync_file_results: []
diff --git a/roles/etcd/templates/etcd-events-rkt.service.j2 b/roles/etcd/templates/etcd-events-rkt.service.j2
new file mode 100644
index 000000000..7886a038b
--- /dev/null
+++ b/roles/etcd/templates/etcd-events-rkt.service.j2
@@ -0,0 +1,31 @@
+[Unit]
+Description=etcd events rkt wrapper
+Documentation=https://github.com/coreos/etcd
+Wants=network.target
+
+[Service]
+Restart=on-failure
+RestartSec=10s
+TimeoutStartSec=0
+LimitNOFILE=40000
+
+ExecStart=/usr/bin/rkt run \
+--uuid-file-save=/var/run/etcd-events.uuid \
+--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
+--mount volume=hosts,target=/etc/hosts \
+--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
+--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
+--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
+--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
+--volume=etcd-data-dir,kind=host,source={{ etcd_events_data_dir }},readOnly=false \
+--mount=volume=etcd-data-dir,target={{ etcd_events_data_dir }} \
+--set-env-file=/etc/etcd-events.env \
+--stage1-from-dir=stage1-fly.aci \
+{{ etcd_image_repo }}:{{ etcd_image_tag }} \
+--name={{ etcd_member_name | default("etcd-events") }}
+
+ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd-events.uuid
+ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd-events.uuid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index ad5d7b303..e9ed3de34 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -60,6 +60,9 @@ dashboard_certs_secret_name: kubernetes-dashboard-certs
dashboard_tls_key_file: dashboard.key
dashboard_tls_cert_file: dashboard.crt
+# Override dashboard default settings
+dashboard_token_ttl: 900
+
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"
diff --git a/roles/kubernetes-apps/ansible/tasks/kubedns.yml b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
index c4c34ecf8..b2199d446 100644
--- a/roles/kubernetes-apps/ansible/tasks/kubedns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
@@ -19,6 +19,7 @@
- rbac_enabled or item.type not in rbac_resources
tags:
- dnsmasq
+ - kubedns
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
@@ -39,3 +40,4 @@
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
tags:
- dnsmasq
+ - kubedns
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index ceb667f69..62169d27d 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -17,6 +17,9 @@
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
+ - dnsmasq
+ - coredns
+ - kubedns
- name: Kubernetes Apps | CoreDNS
import_tasks: "tasks/coredns.yml"
@@ -56,6 +59,8 @@
delay: 5
tags:
- dnsmasq
+ - coredns
+ - kubedns
- name: Kubernetes Apps | Netchecker
import_tasks: tasks/netchecker.yml
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index c07f07bba..0a133abb5 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Check if netchecker-server manifest already exists
stat:
- path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2"
+ path: "{{ kube_config_dir }}/netchecker-server-deployment.yml"
register: netchecker_server_manifest
tags:
- facts
@@ -22,16 +22,16 @@
- name: Kubernetes Apps | Lay Down Netchecker Template
template:
- src: "{{item.file}}"
+ src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- - {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
- - {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
- - {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
- - {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
- - {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
- - {file: netchecker-server-deployment.yml.j2, type: deployment, name: netchecker-server}
- - {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
+ - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
+ - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
+ - {file: netchecker-server-sa.yml, type: sa, name: netchecker-server}
+ - {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server}
+ - {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
+ - {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
+ - {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
index 360480c1e..34cd4b77e 100644
--- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
@@ -11,7 +11,7 @@ data:
.:53 {
errors
health
- kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa {
+ kubernetes {{ dns_domain }} in-addr.arpa ip6.arpa {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
index 5cba6f1f0..dc1f51937 100644
--- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
@@ -34,6 +34,22 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
containers:
- name: coredns
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
index 860a5c26f..41f6716e7 100644
--- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
@@ -166,6 +166,7 @@ spec:
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
+ - --token-ttl={{ dashboard_token_ttl }}
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
index d7c30eceb..73ae3a01a 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
@@ -30,7 +30,24 @@ spec:
spec:
tolerations:
- effect: NoSchedule
- operator: Exists
+ operator: Equal
+ key: node-role.kubernetes.io/master
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: kubedns-autoscaler
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
containers:
- name: autoscaler
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
index cfce65f0e..bb040780f 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
@@ -30,8 +30,25 @@ spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- - effect: NoSchedule
- operator: Exists
+ - effect: "NoSchedule"
+ operator: "Equal"
+ key: "node-role.kubernetes.io/master"
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
volumes:
- name: kube-dns-config
configMap:
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
index dd5b9b630..4b9ab0067 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
@@ -1,9 +1,12 @@
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: efk
namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: efk
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
index 75d75f650..01e774e96 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
@@ -6,3 +6,4 @@ metadata:
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
index 4cdcf33ad..51666c1f2 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
@@ -1,15 +1,17 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
-kind: Deployment
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
metadata:
- name: elasticsearch-logging-v1
+ name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: "{{ elasticsearch_image_tag }}"
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ serviceName: elasticsearch-logging
replicas: 2
selector:
matchLabels:
@@ -53,4 +55,10 @@ spec:
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
+ initContainers:
+ - image: alpine:3.6
+ command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
+ name: elasticsearch-logging-init
+ securityContext:
+ privileged: true
diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
index e8d93732c..0305a5f7a 100644
--- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
@@ -1,7 +1,7 @@
---
fluentd_cpu_limit: 0m
-fluentd_mem_limit: 200Mi
+fluentd_mem_limit: 500Mi
fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi
-fluentd_config_dir: /etc/kubernetes/fluentd
-fluentd_config_file: fluentd.conf
+fluentd_config_dir: /etc/fluent/config.d
+# fluentd_config_file: fluentd.conf
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
index b7de44dc0..0b0229f69 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
@@ -1,10 +1,19 @@
+---
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentd-config
namespace: "kube-system"
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
data:
- {{ fluentd_config_file }}: |
+ system.conf: |-
+
+ root_dir /tmp/fluentd-buffers/
+
+
+ containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
@@ -18,7 +27,6 @@ data:
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
- # Maintainer: Jimmi Dyson
#
# Example
# =======
@@ -99,63 +107,87 @@ data:
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
- #
- # TODO: Propagate the labels associated with a container along with its logs
- # so users can query logs using labels as well as or instead of the pod name
- # and container name. This is simply done via configuration of the Kubernetes
- # fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
- # problem yet to be solved as secrets are not usable in static pods which the fluentd
- # pod must be until a per-node controller is available in Kubernetes.
- # Prevent fluentd from handling records containing its own logs. Otherwise
- # it can lead to an infinite loop, when error in sending one message generates
- # another message which also fails to be sent and so on.
-
- type null
-
- # Example:
+
+ # Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+ # CRI Log Example:
+ # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
- type tail
+ @id fluentd-containers.log
+ @type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
- tag kubernetes.*
- format json
+ tag raw.kubernetes.*
read_from_head true
+
+ @type multi_format
+
+ format json
+ time_key time
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+ format /^(?
+
+
+ # Detect exceptions in the log output and forward them as one log entry.
+
+ @id raw.kubernetes
+ @type detect_exceptions
+ remove_tag_prefix raw
+ message log
+ stream stream
+ multiline_flush_interval 5
+ max_bytes 500000
+ max_lines 1000
+
+
+ system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
- type tail
+ @id minion
+ @type tail
format /^(?
+
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
- type tail
+ @id startupscript.log
+ @type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
+
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
- type tail
+ @id docker.log
+ @type tail
format /^time="(?
+
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
- type tail
+ @id etcd.log
+ @type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@@ -163,13 +195,16 @@ data:
pos_file /var/log/es-etcd.log.pos
tag etcd
+
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
+
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
- type tail
+ @id kubelet.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -179,10 +214,12 @@ data:
pos_file /var/log/es-kubelet.log.pos
tag kubelet
+
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
- type tail
+ @id kube-proxy.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -192,10 +229,12 @@ data:
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
+
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
- type tail
+ @id kube-apiserver.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -205,10 +244,12 @@ data:
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
+
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
- type tail
+ @id kube-controller-manager.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -218,10 +259,12 @@ data:
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
+
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
- type tail
+ @id kube-scheduler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -231,10 +274,12 @@ data:
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
+
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
- type tail
+ @id rescheduler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -244,10 +289,12 @@ data:
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
- type tail
+ @id glbc.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -257,10 +304,12 @@ data:
pos_file /var/log/es-glbc.log.pos
tag glbc
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
- type tail
+ @id cluster-autoscaler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -270,59 +319,123 @@ data:
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
+
+ # Logs from systemd-journal for interesting services.
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
+
+ @id journald-docker
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "docker.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag docker
+
+
+ #
+ # @id journald-container-runtime
+ # @type systemd
+ # filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
+ #
+ # @type local
+ # persistent true
+ #
+ # read_from_head true
+ # tag container-runtime
+ #
+
+
+ @id journald-kubelet
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag kubelet
+
+
+
+ @id journald-node-problem-detector
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag node-problem-detector
+
+
+ forward.input.conf: |-
+ # Takes the messages sent over TCP
+
+ @type forward
+
+
+ monitoring.conf: |-
+ # Prometheus Exporter Plugin
+ # input plugin that exports metrics
+
+ @type prometheus
+
+
+
+ @type monitor_agent
+
+
+ # input plugin that collects metrics from MonitorAgent
+
+ @type prometheus_monitor
+
+ host ${hostname}
+
+
+
+ # input plugin that collects metrics for output plugin
+
+ @type prometheus_output_monitor
+
+ host ${hostname}
+
+
+
+ # input plugin that collects metrics for in_tail plugin
+
+ @type prometheus_tail_monitor
+
+ host ${hostname}
+
+
+
+ output.conf: |-
+ # Enriches records with Kubernetes metadata
- type kubernetes_metadata
+ @type kubernetes_metadata
- ## Prometheus Exporter Plugin
- ## input plugin that exports metrics
- #
- # type prometheus
- #
- #
- # type monitor_agent
- #
- #
- # type forward
- #
- ## input plugin that collects metrics from MonitorAgent
- #
- # @type prometheus_monitor
- #
- # host ${hostname}
- #
- #
- ## input plugin that collects metrics for output plugin
- #
- # @type prometheus_output_monitor
- #
- # host ${hostname}
- #
- #
- ## input plugin that collects metrics for in_tail plugin
- #
- # @type prometheus_tail_monitor
- #
- # host ${hostname}
- #
- #
+
- type elasticsearch
- user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
- password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
- log_level info
- include_tag_key true
- host elasticsearch-logging
- port 9200
- logstash_format true
- # Set the chunk limit the same as for fluentd-gcp.
- buffer_chunk_limit 2M
- # Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
- buffer_queue_limit 32
- flush_interval 5s
- # Never wait longer than 5 minutes between retries.
- max_retry_wait 30
- # Disable the limit on the number of retries (retry forever).
- disable_retry_limit
- # Use multiple threads for processing.
- num_threads 8
-
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ host elasticsearch-logging
+ port 9200
+ logstash_format true
+
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+
+
\ No newline at end of file
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
index f23a8851c..6405f0cc9 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
@@ -1,32 +1,42 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
+apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: "fluentd-es-v{{ fluentd_version }}"
+ name: "fluentd-es-{{ fluentd_version }}"
namespace: "kube-system"
labels:
k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ version: "{{ fluentd_version }}"
+ # This annotation ensures that fluentd does not get evicted if the node
+ # supports critical pod annotation based priority scheme.
+ # Note that this does not guarantee admission on the nodes (#40573).
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
- tolerations:
- - effect: NoSchedule
- operator: Exists
+ priorityClassName: system-node-critical
+{% if rbac_enabled %}
+ serviceAccountName: efk
+{% endif %}
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
- command:
- - '/bin/sh'
- - '-c'
- - '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
+ env:
+ - name: FLUENTD_ARGS
+ value: "--no-supervisor -q"
resources:
limits:
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
@@ -34,27 +44,24 @@ spec:
{% endif %}
memory: {{ fluentd_mem_limit }}
requests:
- cpu: {{ fluentd_cpu_requests }}
+ cpu: {{ fluentd_cpu_requests }}
memory: {{ fluentd_mem_requests }}
volumeMounts:
- name: varlog
mountPath: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
mountPath: "{{ docker_daemon_graph }}/containers"
readOnly: true
- - name: config
+ - name: config-volume
mountPath: "{{ fluentd_config_dir }}"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
hostPath:
path: {{ docker_daemon_graph }}/containers
- - name: config
- configMap:
+ - name: config-volume
+ configMap:
name: fluentd-config
-{% if rbac_enabled %}
- serviceAccountName: efk
-{% endif %}
diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
index 0651a032d..c76e3e710 100644
--- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
@@ -4,3 +4,4 @@ kibana_mem_limit: 0M
kibana_cpu_requests: 100m
kibana_mem_requests: 0M
kibana_service_port: 5601
+kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
index c5603d389..880482d4d 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
@@ -1,6 +1,6 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
+apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
@@ -36,10 +36,12 @@ spec:
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
-{% if kibana_base_url is defined and kibana_base_url != "" %}
- - name: "KIBANA_BASE_URL"
+ - name: "SERVER_BASEPATH"
value: "{{ kibana_base_url }}"
-{% endif %}
+ - name: XPACK_MONITORING_ENABLED
+ value: "false"
+ - name: XPACK_SECURITY_ENABLED
+ value: "false"
ports:
- containerPort: 5601
name: ui
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
index aa1bbcf83..577fbff1e 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
@@ -1,7 +1,10 @@
---
-cephfs_provisioner_namespace: "kube-system"
+cephfs_provisioner_namespace: "cephfs-provisioner"
cephfs_provisioner_cluster: ceph
-cephfs_provisioner_monitors: []
+cephfs_provisioner_monitors: ~
cephfs_provisioner_admin_id: admin
cephfs_provisioner_secret: secret
cephfs_provisioner_storage_class: cephfs
+cephfs_provisioner_reclaim_policy: Delete
+cephfs_provisioner_claim_root: /volumes
+cephfs_provisioner_deterministic_names: true
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
index c1fdc624c..f526e95cd 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -1,5 +1,32 @@
---
+- name: CephFS Provisioner | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: CephFS Provisioner | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: CephFS Provisioner | Remove legacy storageclass
+ shell: |
+ {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
- name: CephFS Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
@@ -7,22 +34,24 @@
owner: root
group: root
mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
- name: CephFS Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
with_items:
- - { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
- - { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
- - { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
- - { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
- - { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
- - { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
- - { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
- - { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
- register: cephfs_manifests
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
+ - { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
+ - { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
+ - { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
+ - { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
+ - { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
+ - { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: rs }
+ - { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
+ register: cephfs_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: CephFS Provisioner | Apply manifests
@@ -33,5 +62,5 @@
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
state: "latest"
- with_items: "{{ cephfs_manifests.results }}"
+ with_items: "{{ cephfs_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
similarity index 98%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
index 976f29c05..b39faab14 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
@@ -1,6 +1,6 @@
---
apiVersion: apps/v1
-kind: ReplicaSet
+kind: Deployment
metadata:
name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
namespace: {{ cephfs_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
similarity index 52%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
index 6ada523cb..dd0e37eb5 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
@@ -4,9 +4,12 @@ kind: StorageClass
metadata:
name: {{ cephfs_provisioner_storage_class }}
provisioner: ceph.com/cephfs
+reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
parameters:
cluster: {{ cephfs_provisioner_cluster }}
- monitors: {{ cephfs_provisioner_monitors | join(',') }}
+ monitors: {{ cephfs_provisioner_monitors }}
adminId: {{ cephfs_provisioner_admin_id }}
- adminSecretName: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
+ adminSecretName: cephfs-provisioner
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
+ claimRoot: {{ cephfs_provisioner_claim_root }}
+ deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
similarity index 70%
rename from roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
index 796e30b81..6d73c0c15 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
@@ -2,7 +2,7 @@
kind: Secret
apiVersion: v1
metadata:
- name: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
+ name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
type: Opaque
data:
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
index 458a483cb..900694795 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
@@ -46,18 +46,20 @@ to limit the quota of persistent volumes.
### Simple directories
-``` bash
-for vol in vol6 vol7 vol8; do
-mkdir /mnt/disks/$vol
-done
-```
-
-This is also acceptable in a development environment, but there is no capacity
+In a development environment using `mount --bind` works also, but there is no capacity
management.
+### Block volumeMode PVs
+
+Create a symbolic link under discovery directory to the block device on the node. To use
+raw block devices in pods BlockVolume feature gate must be enabled.
+
Usage notes
-----------
+Beta PV.NodeAffinity field is used by default. If running against an older K8s
+version, the useAlphaAPI flag must be set in the configMap.
+
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
will be recreated and read the size correctly.
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
index 80a74f5f1..cc73e073d 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
@@ -19,6 +19,9 @@ spec:
version: {{ local_volume_provisioner_image_tag }}
spec:
serviceAccountName: local-volume-provisioner
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
@@ -30,12 +33,17 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
+ - name: MY_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
volumeMounts:
- name: local-volume-provisioner
mountPath: /etc/provisioner/config
readOnly: true
- name: local-volume-provisioner-hostpath-mnt-disks
mountPath: {{ local_volume_provisioner_mount_dir }}
+ mountPropagation: "HostToContainer"
volumes:
- name: local-volume-provisioner
configMap:
diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml
index 0bc22739c..2e8174521 100644
--- a/roles/kubernetes-apps/helm/defaults/main.yml
+++ b/roles/kubernetes-apps/helm/defaults/main.yml
@@ -18,3 +18,6 @@ helm_skip_refresh: false
# Override values for the Tiller Deployment manifest.
# tiller_override: "key1=val1,key2=val2"
+
+# Limit the maximum number of revisions saved per release. Use 0 for no limit.
+# tiller_max_history: 0
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index e7b387944..7e400d3fe 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -34,6 +34,7 @@
{% if rbac_enabled %} --service-account=tiller{% endif %}
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
+ {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
- name: Helm | Set up bash completion
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
index bc6bceb15..5136cad53 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
@@ -1,6 +1,2 @@
---
cert_manager_namespace: "cert-manager"
-cert_manager_cpu_requests: 10m
-cert_manager_cpu_limits: 30m
-cert_manager_memory_requests: 32Mi
-cert_manager_memory_limits: 200Mi
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
index eeb29da2d..d8ca7ad17 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -1,5 +1,23 @@
---
+- name: Cert Manager | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/cert_manager"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: Cert Manager | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
- name: Cert Manager | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/cert_manager"
@@ -7,20 +25,22 @@
owner: root
group: root
mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
- name: Cert Manager | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
with_items:
- - { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
- - { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
- - { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
- - { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
- - { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
- - { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
- - { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: sa-cert-manager, file: sa-cert-manager.yml, type: sa }
+ - { name: crd-certificate, file: crd-certificate.yml, type: crd }
+ - { name: crd-clusterissuer, file: crd-clusterissuer.yml, type: crd }
+ - { name: crd-issuer, file: crd-issuer.yml, type: crd }
+ - { name: clusterrole-cert-manager, file: clusterrole-cert-manager.yml, type: clusterrole }
+ - { name: clusterrolebinding-cert-manager, file: clusterrolebinding-cert-manager.yml, type: clusterrolebinding }
+ - { name: deploy-cert-manager, file: deploy-cert-manager.yml, type: deploy }
register: cert_manager_manifests
when:
- inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
similarity index 96%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
index ce6aa48bf..0ce11fb9b 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
rules:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
similarity index 92%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
index d1e26e462..7dd567fd9 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
roleRef:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
similarity index 92%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
index 0d27800b3..a1663c64d 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
similarity index 91%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
index 8ac64e35f..869d4d260 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
similarity index 91%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
index a11386d10..1946b81bf 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
similarity index 51%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
index 7fe98407b..0221be562 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
@@ -6,15 +6,19 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
spec:
replicas: 1
+ selector:
+ matchLabels:
+ app: cert-manager
+ release: cert-manager
template:
metadata:
labels:
- k8s-app: cert-manager
+ app: cert-manager
release: cert-manager
annotations:
spec:
@@ -25,6 +29,7 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --cluster-resource-namespace=$(POD_NAMESPACE)
+ - --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: POD_NAMESPACE
valueFrom:
@@ -32,20 +37,5 @@ spec:
fieldPath: metadata.namespace
resources:
requests:
- cpu: {{ cert_manager_cpu_requests }}
- memory: {{ cert_manager_memory_requests }}
- limits:
- cpu: {{ cert_manager_cpu_limits }}
- memory: {{ cert_manager_memory_limits }}
-
- - name: ingress-shim
- image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
- imagePullPolicy: {{ k8s_image_pull_policy }}
- resources:
- requests:
- cpu: {{ cert_manager_cpu_requests }}
- memory: {{ cert_manager_memory_requests }}
- limits:
- cpu: {{ cert_manager_cpu_limits }}
- memory: {{ cert_manager_memory_limits }}
-
+ cpu: 10m
+ memory: 32Mi
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
similarity index 86%
rename from roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
rename to roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
index 1a67bf6a4..c5270e88b 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
@@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.4.1
release: cert-manager
heritage: Tiller
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
index ff1217809..8acee53eb 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
@@ -1,6 +1,8 @@
---
ingress_nginx_namespace: "ingress-nginx"
ingress_nginx_host_network: false
+ingress_nginx_nodeselector:
+ node-role.kubernetes.io/master: "true"
ingress_nginx_insecure_port: 80
ingress_nginx_secure_port: 443
ingress_nginx_configmap: {}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
index 0a37e94cd..eff3c7ed8 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
@@ -1,5 +1,23 @@
---
+- name: NGINX Ingress Controller | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/ingress_nginx"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: NGINX Ingress Controller | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
- name: NGINX Ingress Controller | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/ingress_nginx"
@@ -7,24 +25,26 @@
owner: root
group: root
mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
- name: NGINX Ingress Controller | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}"
with_items:
- - { name: ingress-nginx-ns, file: ingress-nginx-ns.yml, type: ns }
- - { name: ingress-nginx-sa, file: ingress-nginx-sa.yml, type: sa }
- - { name: ingress-nginx-role, file: ingress-nginx-role.yml, type: role }
- - { name: ingress-nginx-rolebinding, file: ingress-nginx-rolebinding.yml, type: rolebinding }
- - { name: ingress-nginx-clusterrole, file: ingress-nginx-clusterrole.yml, type: clusterrole }
- - { name: ingress-nginx-clusterrolebinding, file: ingress-nginx-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: ingress-nginx-cm, file: ingress-nginx-cm.yml, type: cm }
- - { name: ingress-nginx-tcp-servicecs-cm, file: ingress-nginx-tcp-servicecs-cm.yml, type: cm }
- - { name: ingress-nginx-udp-servicecs-cm, file: ingress-nginx-udp-servicecs-cm.yml, type: cm }
- - { name: ingress-nginx-default-backend-svc, file: ingress-nginx-default-backend-svc.yml, type: svc }
- - { name: ingress-nginx-default-backend-rs, file: ingress-nginx-default-backend-rs.yml, type: rs }
- - { name: ingress-nginx-controller-ds, file: ingress-nginx-controller-ds.yml, type: ds }
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: deploy-default-backend, file: deploy-default-backend.yml, type: deploy }
+ - { name: svc-default-backend, file: svc-default-backend.yml, type: svc }
+ - { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm }
+ - { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm }
+ - { name: cm-udp-services, file: cm-udp-services.yml, type: cm }
+ - { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa }
+ - { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole }
+ - { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding }
+ - { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role }
+ - { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding }
+ - { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds }
register: ingress_nginx_manifests
when:
- inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2
similarity index 82%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2
index 7e47e81b1..00c44a97b 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2
@@ -6,5 +6,7 @@ metadata:
namespace: {{ ingress_nginx_namespace }}
labels:
k8s-app: ingress-nginx
+{% if ingress_nginx_configmap %}
data:
{{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2
similarity index 71%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2
index 0a87e91b7..d97c42d97 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2
@@ -2,9 +2,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
- name: ingress-nginx-tcp-services
+ name: tcp-services
namespace: {{ ingress_nginx_namespace }}
labels:
k8s-app: ingress-nginx
+{% if ingress_nginx_configmap_tcp_services %}
data:
{{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2
similarity index 71%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2
index d943e5718..b343869b7 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2
@@ -2,9 +2,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
- name: ingress-nginx-udp-services
+ name: udp-services
namespace: {{ ingress_nginx_namespace }}
labels:
k8s-app: ingress-nginx
+{% if ingress_nginx_configmap_udp_services %}
data:
{{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
similarity index 71%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
index c0bed920b..eca5a5084 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
@@ -1,27 +1,27 @@
---
apiVersion: apps/v1
-kind: ReplicaSet
+kind: Deployment
metadata:
- name: ingress-nginx-default-backend-v{{ ingress_nginx_default_backend_image_tag }}
+ name: default-backend-v{{ ingress_nginx_default_backend_image_tag }}
namespace: {{ ingress_nginx_namespace }}
labels:
- k8s-app: ingress-nginx-default-backend
+ k8s-app: default-backend
version: v{{ ingress_nginx_default_backend_image_tag }}
spec:
replicas: 1
selector:
matchLabels:
- k8s-app: ingress-nginx-default-backend
+ k8s-app: default-backend
version: v{{ ingress_nginx_default_backend_image_tag }}
template:
metadata:
labels:
- k8s-app: ingress-nginx-default-backend
+ k8s-app: default-backend
version: v{{ ingress_nginx_default_backend_image_tag }}
spec:
terminationGracePeriodSeconds: 60
containers:
- - name: ingress-nginx-default-backend
+ - name: default-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
@@ -35,3 +35,10 @@ spec:
timeoutSeconds: 5
ports:
- containerPort: 8080
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
similarity index 80%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
index 52501a4c7..068754642 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
@@ -7,9 +7,6 @@ metadata:
labels:
k8s-app: ingress-nginx
version: v{{ ingress_nginx_controller_image_tag }}
- annotations:
- prometheus.io/port: '10254'
- prometheus.io/scrape: 'true'
spec:
selector:
matchLabels:
@@ -24,23 +21,35 @@ spec:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
+{% if rbac_enabled %}
+ serviceAccountName: ingress-nginx
+{% endif %}
{% if ingress_nginx_host_network %}
hostNetwork: true
{% endif %}
+{% if ingress_nginx_nodeselector %}
nodeSelector:
- node-role.kubernetes.io/ingress: "true"
- terminationGracePeriodSeconds: 60
+ {{ ingress_nginx_nodeselector | to_nice_yaml }}
+{%- endif %}
containers:
- name: ingress-nginx-controller
image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /nginx-ingress-controller
- - --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend
+ - --default-backend-service=$(POD_NAMESPACE)/default-backend
- --configmap=$(POD_NAMESPACE)/ingress-nginx
- - --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services
- - --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services
+ - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
+ - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ # www-data -> 33
+ runAsUser: 33
env:
- name: POD_NAME
valueFrom:
@@ -76,7 +85,3 @@ spec:
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
-{% if rbac_enabled %}
- serviceAccountName: ingress-nginx
-{% endif %}
-
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
similarity index 56%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
index ab23f3799..326cc8843 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
@@ -2,13 +2,13 @@
apiVersion: v1
kind: Service
metadata:
- name: ingress-nginx-default-backend
+ name: default-backend
namespace: {{ ingress_nginx_namespace }}
labels:
- k8s-app: ingress-nginx-default-backend
+ k8s-app: default-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
- k8s-app: ingress-nginx-default-backend
+ k8s-app: default-backend
diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml
deleted file mode 100644
index 6124ce42e..000000000
--- a/roles/kubernetes-apps/istio/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-istio_namespace: istio-system
diff --git a/roles/kubernetes-apps/istio/tasks/main.yml b/roles/kubernetes-apps/istio/tasks/main.yml
deleted file mode 100644
index 5e36a56cc..000000000
--- a/roles/kubernetes-apps/istio/tasks/main.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: istio | Create addon dir
- file:
- path: "{{ kube_config_dir }}/addons/istio"
- owner: root
- group: root
- mode: 0755
- recurse: yes
-
-- name: istio | Lay out manifests
- template:
- src: "{{item.file}}.j2"
- dest: "{{kube_config_dir}}/addons/istio/{{item.file}}"
- with_items:
- - {name: istio-mixer, file: istio.yml, type: deployment }
- - {name: istio-initializer, file: istio-initializer.yml, type: deployment }
- register: manifests
- when: inventory_hostname == groups['kube-master'][0]
-
-- name: istio | Copy istioctl binary from download dir
- command: rsync -piu "{{ local_release_dir }}/istio/istioctl" "{{ bin_dir }}/istioctl"
- changed_when: false
-
-- name: istio | Set up bash completion
- shell: "{{ bin_dir }}/istioctl completion >/etc/bash_completion.d/istioctl.sh"
- when: ansible_os_family in ["Debian","RedHat"]
-
-- name: istio | Set bash completion file
- file:
- path: /etc/bash_completion.d/istioctl.sh
- owner: root
- group: root
- mode: 0755
- when: ansible_os_family in ["Debian","RedHat"]
-
-- name: istio | apply manifests
- kube:
- name: "{{item.item.name}}"
- namespace: "{{ istio_namespace }}"
- kubectl: "{{bin_dir}}/kubectl"
- resource: "{{item.item.type}}"
- filename: "{{kube_config_dir}}/addons/istio/{{item.item.file}}"
- state: "latest"
- with_items: "{{ manifests.results }}"
- when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2 b/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2
deleted file mode 100644
index 84f957ed1..000000000
--- a/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-# GENERATED FILE. Use with Kubernetes 1.7+
-# TO UPDATE, modify files in install/kubernetes/templates and run install/updateVersion.sh
-################################
-# Istio initializer
-################################
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio-inject
- namespace: {{ istio_namespace }}
-data:
- config: |-
- policy: "enabled"
- namespaces: [""] # everything, aka v1.NamepsaceAll, aka cluster-wide
- initializerName: "sidecar.initializer.istio.io"
- params:
- initImage: {{ istio_proxy_init_image_repo }}:{{ istio_proxy_init_image_tag }}
- proxyImage: {{ istio_proxy_image_repo }}:{{ istio_proxy_image_tag }}
- verbosity: 2
- version: 0.2.6
- meshConfigMapName: istio
- imagePullPolicy: IfNotPresent
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-initializer-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: apps/v1beta1
-kind: Deployment
-metadata:
- name: istio-initializer
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
- initializers:
- pending: []
- labels:
- istio: istio-initializer
-spec:
- replicas: 1
- template:
- metadata:
- name: istio-initializer
- labels:
- istio: initializer
- annotations:
- sidecar.istio.io/inject: "false"
- spec:
- serviceAccountName: istio-initializer-service-account
- containers:
- - name: initializer
- image: {{ istio_sidecar_initializer_image_repo }}:{{ istio_sidecar_initializer_image_tag }}
- imagePullPolicy: IfNotPresent
- args:
- - --port=8083
- - --namespace={{ istio_namespace }}
- - -v=2
- volumeMounts:
- - name: config-volume
- mountPath: /etc/istio/config
- volumes:
- - name: config-volume
- configMap:
- name: istio
----
-apiVersion: admissionregistration.k8s.io/v1alpha1
-kind: InitializerConfiguration
-metadata:
- name: istio-sidecar
-initializers:
- - name: sidecar.initializer.istio.io
- rules:
- - apiGroups:
- - "*"
- apiVersions:
- - "*"
- resources:
- - deployments
- - statefulsets
- - jobs
- - daemonsets
----
diff --git a/roles/kubernetes-apps/istio/templates/istio.yml.j2 b/roles/kubernetes-apps/istio/templates/istio.yml.j2
deleted file mode 100644
index bd0b93a7f..000000000
--- a/roles/kubernetes-apps/istio/templates/istio.yml.j2
+++ /dev/null
@@ -1,1285 +0,0 @@
-# GENERATED FILE. Use with Kubernetes 1.7+
-# TO UPDATE, modify files in install/kubernetes/templates and run install/updateVersion.sh
-################################
-# Istio system namespace
-################################
-apiVersion: v1
-kind: Namespace
-metadata:
- name: {{ istio_namespace }}
----
-################################
-# Istio RBAC
-################################
-# Permissions and roles for istio
-# To debug: start the cluster with -vmodule=rbac,3 to enable verbose logging on RBAC DENY
-# Also helps to enable logging on apiserver 'wrap' to see the URLs.
-# Each RBAC deny needs to be mapped into a rule for the role.
-# If using minikube, start with '--extra-config=apiserver.Authorization.Mode=RBAC'
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-pilot-istio-system
-rules:
-- apiGroups: ["config.istio.io"]
- resources: ["*"]
- verbs: ["*"]
-- apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["*"]
-- apiGroups: ["istio.io"]
- resources: ["istioconfigs", "istioconfigs.istio.io"]
- verbs: ["*"]
-- apiGroups: ["extensions"]
- resources: ["thirdpartyresources", "thirdpartyresources.extensions", "ingresses", "ingresses/status"]
- verbs: ["*"]
-- apiGroups: [""]
- resources: ["configmaps", "endpoints", "pods", "services"]
- verbs: ["*"]
-- apiGroups: [""]
- resources: ["namespaces", "nodes", "secrets"]
- verbs: ["get", "list", "watch"]
-- apiGroups: ["admissionregistration.k8s.io"]
- resources: ["externaladmissionhookconfigurations"]
- verbs: ["create", "update", "delete"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-initializer-istio-system
-rules:
-- apiGroups: ["*"]
- resources: ["deployments", "statefulsets", "jobs", "cronjobs", "daemonsets", "replicasets", "replicationcontrollers"]
- verbs: ["initialize", "patch", "watch", "list"]
-- apiGroups: ["*"]
- resources: ["configmaps"]
- verbs: ["get", "list", "watch"]
----
-# Mixer CRD needs to watch and list CRDs
-# It also uses discovery API to discover Kinds of config.istio.io
-# K8s adapter needs to list pods, services etc.
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-mixer-istio-system
-rules:
-- apiGroups: ["config.istio.io"] # Istio CRD watcher
- resources: ["*"]
- verbs: ["get", "list", "watch"]
-- apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["get", "list", "watch"]
-- apiGroups: [""]
- resources: ["configmaps", "endpoints", "pods", "services", "namespaces", "secrets"]
- verbs: ["get", "list", "watch"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ca-istio-system
-rules:
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["create", "get", "watch", "list", "update"]
-- apiGroups: [""]
- resources: ["serviceaccounts"]
- verbs: ["get", "watch", "list"]
----
-# Permissions for the sidecar proxy.
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-sidecar-istio-system
-rules:
-- apiGroups: ["istio.io"]
- resources: ["istioconfigs"]
- verbs: ["get", "watch", "list"]
-- apiGroups: ["extensions"]
- resources: ["thirdpartyresources", "ingresses"]
- verbs: ["get", "watch", "list", "update"]
-- apiGroups: [""]
- resources: ["configmaps", "pods", "endpoints", "services"]
- verbs: ["get", "watch", "list"]
----
-# Grant permissions to the Pilot/discovery.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-pilot-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-pilot-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Sidecar initializer
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-initializer-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-initializer-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-initializer-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the CA.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ca-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-ca-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-ca-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Ingress controller.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ingress-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-ingress-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Egress controller.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-egress-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-egress-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the sidecar.
-# TEMPORARY: the istioctl should generate a separate service account for the proxy, and permission
-# granted only to that account !
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-sidecar-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: default
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-sidecar-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to Mixer.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-mixer-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-mixer-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-mixer-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Mixer
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
-data:
- mapping.conf: |-
----
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
- labels:
- istio: mixer
-spec:
- ports:
- - name: tcp
- port: 9091
- - name: http-health
- port: 9093
- - name: configapi
- port: 9094
- - name: statsd-prom
- port: 9102
- - name: statsd-udp
- port: 9125
- protocol: UDP
- - name: prometheus
- port: 42422
- selector:
- istio: mixer
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-mixer-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: mixer
- spec:
- serviceAccountName: istio-mixer-service-account
- containers:
- - name: statsd-to-prometheus
- image: {{ istio_statsd_image_repo }}:{{ istio_statsd_image_tag }}
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 9102
- - containerPort: 9125
- protocol: UDP
- args:
- - '-statsd.mapping-config=/etc/statsd/mapping.conf'
- volumeMounts:
- - name: config-volume
- mountPath: /etc/statsd
- - name: mixer
- image: {{ istio_mixer_image_repo }}:{{ istio_mixer_image_tag }}
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 9091
- - containerPort: 9094
- - containerPort: 42422
- args:
- - --configStoreURL=fs:///etc/opt/mixer/configroot
- - --configStore2URL=k8s://
- - --configDefaultNamespace=istio-system
- - --traceOutput=http://zipkin:9411/api/v1/spans
- - --logtostderr
- - -v
- - "2"
- volumes:
- - name: config-volume
- configMap:
- name: istio-mixer
----
-# Mixer CRD definitions are generated using
-# mixs crd all
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: rules.config.istio.io
- labels:
- package: istio.io.mixer
- istio: core
-spec:
- group: config.istio.io
- names:
- kind: rule
- plural: rules
- singular: rule
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: attributemanifests.config.istio.io
- labels:
- package: istio.io.mixer
- istio: core
-spec:
- group: config.istio.io
- names:
- kind: attributemanifest
- plural: attributemanifests
- singular: attributemanifest
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: deniers.config.istio.io
- labels:
- package: denier
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: denier
- plural: deniers
- singular: denier
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: listcheckers.config.istio.io
- labels:
- package: listchecker
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: listchecker
- plural: listcheckers
- singular: listchecker
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: memquotas.config.istio.io
- labels:
- package: memquota
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: memquota
- plural: memquotas
- singular: memquota
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: noops.config.istio.io
- labels:
- package: noop
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: noop
- plural: noops
- singular: noop
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: prometheuses.config.istio.io
- labels:
- package: prometheus
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: prometheus
- plural: prometheuses
- singular: prometheus
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: stackdrivers.config.istio.io
- labels:
- package: stackdriver
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: stackdriver
- plural: stackdrivers
- singular: stackdriver
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: statsds.config.istio.io
- labels:
- package: statsd
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: statsd
- plural: statsds
- singular: statsd
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: stdios.config.istio.io
- labels:
- package: stdio
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: stdio
- plural: stdios
- singular: stdio
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: svcctrls.config.istio.io
- labels:
- package: svcctrl
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: svcctrl
- plural: svcctrls
- singular: svcctrl
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: checknothings.config.istio.io
- labels:
- package: checknothing
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: checknothing
- plural: checknothings
- singular: checknothing
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: listentries.config.istio.io
- labels:
- package: listentry
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: listentry
- plural: listentries
- singular: listentry
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: logentries.config.istio.io
- labels:
- package: logentry
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: logentry
- plural: logentries
- singular: logentry
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: metrics.config.istio.io
- labels:
- package: metric
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: metric
- plural: metrics
- singular: metric
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: quotas.config.istio.io
- labels:
- package: quota
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: quota
- plural: quotas
- singular: quota
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: reportnothings.config.istio.io
- labels:
- package: reportnothing
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: reportnothing
- plural: reportnothings
- singular: reportnothing
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: attributemanifest
-metadata:
- name: istioproxy
- namespace: {{ istio_namespace }}
-spec:
- attributes:
- origin.ip:
- valueType: IP_ADDRESS
- origin.uid:
- valueType: STRING
- origin.user:
- valueType: STRING
- request.headers:
- valueType: STRING_MAP
- request.id:
- valueType: STRING
- request.host:
- valueType: STRING
- request.method:
- valueType: STRING
- request.path:
- valueType: STRING
- request.reason:
- valueType: STRING
- request.referer:
- valueType: STRING
- request.scheme:
- valueType: STRING
- request.size:
- valueType: INT64
- request.time:
- valueType: TIMESTAMP
- request.useragent:
- valueType: STRING
- response.code:
- valueType: INT64
- response.duration:
- valueType: DURATION
- response.headers:
- valueType: STRING_MAP
- response.size:
- valueType: INT64
- response.time:
- valueType: TIMESTAMP
- source.uid:
- valueType: STRING
- source.user:
- valueType: STRING
- destination.uid:
- valueType: STRING
- connection.id:
- valueType: STRING
- connection.received.bytes:
- valueType: INT64
- connection.received.bytes_total:
- valueType: INT64
- connection.sent.bytes:
- valueType: INT64
- connection.sent.bytes_total:
- valueType: INT64
- connection.duration:
- valueType: DURATION
- context.protocol:
- valueType: STRING
- context.timestamp:
- valueType: TIMESTAMP
- context.time:
- valueType: TIMESTAMP
-
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: attributemanifest
-metadata:
- name: kubernetes
- namespace: {{ istio_namespace }}
-spec:
- attributes:
- source.ip:
- valueType: IP_ADDRESS
- source.labels:
- valueType: STRING_MAP
- source.name:
- valueType: STRING
- source.namespace:
- valueType: STRING
- source.service:
- valueType: STRING
- source.serviceAccount:
- valueType: STRING
- destination.ip:
- valueType: IP_ADDRESS
- destination.labels:
- valueType: STRING_MAP
- destination.name:
- valueType: STRING
- destination.namespace:
- valueType: STRING
- destination.service:
- valueType: STRING
- destination.serviceAccount:
- valueType: STRING
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: stdio
-metadata:
- name: handler
- namespace: {{ istio_namespace }}
-spec:
- outputAsJson: true
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: logentry
-metadata:
- name: accesslog
- namespace: {{ istio_namespace }}
-spec:
- severity: '"Default"'
- timestamp: request.time
- variables:
- sourceIp: source.ip | ip("0.0.0.0")
- destinationIp: destination.ip | ip("0.0.0.0")
- sourceUser: source.user | ""
- method: request.method | ""
- url: request.path | ""
- protocol: request.scheme | "http"
- responseCode: response.code | 0
- responseSize: response.size | 0
- requestSize: request.size | 0
- latency: response.duration | "0ms"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: stdio
- namespace: {{ istio_namespace }}
-spec:
- match: "true" # If omitted match is true.
- actions:
- - handler: handler.stdio
- instances:
- - accesslog.logentry
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestcount
- namespace: {{ istio_namespace }}
-spec:
- value: "1"
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestduration
- namespace: {{ istio_namespace }}
-spec:
- value: response.duration | "0ms"
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestsize
- namespace: {{ istio_namespace }}
-spec:
- value: request.size | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: responsesize
- namespace: {{ istio_namespace }}
-spec:
- value: response.size | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: tcpbytesent
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only generate when context.protocol == tcp
-spec:
- value: connection.sent.bytes | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: tcpbytereceived
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only generate when context.protocol == tcp
-spec:
- value: connection.received.bytes | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: prometheus
-metadata:
- name: handler
- namespace: {{ istio_namespace }}
-spec:
- metrics:
- - name: request_count
- instance_name: requestcount.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- - name: request_duration
- instance_name: requestduration.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- explicit_buckets:
- bounds: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
- - name: request_size
- instance_name: requestsize.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- exponentialBuckets:
- numFiniteBuckets: 8
- scale: 1
- growthFactor: 10
- - name: response_size
- instance_name: responsesize.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- exponentialBuckets:
- numFiniteBuckets: 8
- scale: 1
- growthFactor: 10
- - name: tcp_bytes_sent
- instance_name: tcpbytesent.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - name: tcp_bytes_received
- instance_name: tcpbytereceived.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: promhttp
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: http
-spec:
- actions:
- - handler: handler.prometheus
- instances:
- - requestcount.metric
- - requestduration.metric
- - requestsize.metric
- - responsesize.metric
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: promtcp
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only execute when context.protocol == TCP
-spec:
- actions:
- - handler: handler.prometheus
- instances:
- - tcpbytesent.metric
- - tcpbytereceived.metric
----
-################################
-# Istio configMap cluster-wide
-################################
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio
- namespace: {{ istio_namespace }}
-data:
- mesh: |-
- # Uncomment the following line to enable mutual TLS between proxies
- # authPolicy: MUTUAL_TLS
- #
- # Set the following variable to true to disable policy checks by the Mixer.
- # Note that metrics will still be reported to the Mixer.
- disablePolicyChecks: false
- # Set enableTracing to false to disable request tracing.
- enableTracing: true
- #
- # To disable the mixer completely (including metrics), comment out
- # the following line
- mixerAddress: istio-mixer.istio-system:9091
- # This is the ingress service name, update if you used a different name
- ingressService: istio-ingress
- egressProxyAddress: istio-egress.istio-system:80
- #
- # Along with discoveryRefreshDelay, this setting determines how
- # frequently should Envoy fetch and update its internal configuration
- # from Istio Pilot. Lower refresh delay results in higher CPU
- # utilization and potential performance loss in exchange for faster
- # convergence. Tweak this value according to your setup.
- rdsRefreshDelay: 1s
- #
- defaultConfig:
- # See rdsRefreshDelay for explanation about this setting.
- discoveryRefreshDelay: 1s
- #
- # TCP connection timeout between Envoy & the application, and between Envoys.
- connectTimeout: 10s
- #
- ### ADVANCED SETTINGS #############
- # Where should envoy's configuration be stored in the istio-proxy container
- configPath: "/etc/istio/proxy"
- binaryPath: "/usr/local/bin/envoy"
- # The pseudo service name used for Envoy.
- serviceCluster: istio-proxy
- # These settings that determine how long an old Envoy
- # process should be kept alive after an occasional reload.
- drainDuration: 45s
- parentShutdownDuration: 1m0s
- #
- # Port where Envoy listens (on local host) for admin commands
- # You can exec into the istio-proxy container in a pod and
- # curl the admin port (curl http://localhost:15000/) to obtain
- # diagnostic information from Envoy. See
- # https://lyft.github.io/envoy/docs/operations/admin.html
- # for more details
- proxyAdminPort: 15000
- #
- # Address where Istio Pilot service is running
- discoveryAddress: istio-pilot.istio-system:8080
- #
- # Zipkin trace collector
- zipkinAddress: zipkin.istio-system:9411
- #
- # Statsd metrics collector. Istio mixer exposes a UDP endpoint
- # to collect and convert statsd metrics into Prometheus metrics.
- statsdUdpAddress: istio-mixer.istio-system:9125
----
-################################
-# Pilot
-################################
-# Pilot CRDs
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: destinationpolicies.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: DestinationPolicy
- listKind: DestinationPolicyList
- plural: destinationpolicies
- singular: destinationpolicy
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: egressrules.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: EgressRule
- listKind: EgressRuleList
- plural: egressrules
- singular: egressrule
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: routerules.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: RouteRule
- listKind: RouteRuleList
- plural: routerules
- singular: routerule
- scope: Namespaced
- version: v1alpha2
----
-# Pilot service for discovery
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-pilot
- namespace: {{ istio_namespace }}
- labels:
- istio: pilot
-spec:
- ports:
- - port: 8080
- name: http-discovery
- - port: 443
- name: http-admission-webhook
- selector:
- istio: pilot
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-pilot-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-pilot
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: pilot
- spec:
- serviceAccountName: istio-pilot-service-account
- containers:
- - name: discovery
- image: {{ istio_pilot_image_repo }}:{{ istio_pilot_image_tag }}
- imagePullPolicy: IfNotPresent
- args: ["discovery", "-v", "2", "--admission-service", "istio-pilot-external"]
- ports:
- - containerPort: 8080
- - containerPort: 443
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: config-volume
- mountPath: /etc/istio/config
- volumes:
- - name: config-volume
- configMap:
- name: istio
----
-################################
-# Istio ingress
-################################
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-ingress
- namespace: {{ istio_namespace }}
- labels:
- istio: ingress
-spec:
- type: LoadBalancer
- ports:
- - port: 80
-# nodePort: 32000
- name: http
- - port: 443
- name: https
- selector:
- istio: ingress
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-ingress-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-ingress
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: ingress
- spec:
- serviceAccountName: istio-ingress-service-account
- containers:
- - name: istio-ingress
- image: {{ istio_proxy_debug_image_repo }}:{{ istio_proxy_debug_image_tag }}
- args:
- - proxy
- - ingress
- - -v
- - "2"
- - --discoveryAddress
- - istio-pilot:8080
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 80
- - containerPort: 443
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: istio-certs
- mountPath: /etc/certs
- readOnly: true
- - name: ingress-certs
- mountPath: /etc/istio/ingress-certs
- readOnly: true
- volumes:
- - name: istio-certs
- secret:
- secretName: istio.default
- optional: true
- - name: ingress-certs
- secret:
- secretName: istio-ingress-certs
- optional: true
----
-################################
-# Istio egress
-################################
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-egress
- namespace: {{ istio_namespace }}
-spec:
- ports:
- - port: 80
- selector:
- istio: egress
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-egress-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-egress
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: egress
- spec:
- serviceAccountName: istio-egress-service-account
- containers:
- - name: proxy
- image: {{ istio_proxy_debug_image_repo }}:{{ istio_proxy_debug_image_tag }}
- imagePullPolicy: IfNotPresent
- args:
- - proxy
- - egress
- - -v
- - "2"
- - --discoveryAddress
- - istio-pilot:8080
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: istio-certs
- mountPath: /etc/certs
- readOnly: true
- volumes:
- - name: istio-certs
- secret:
- secretName: istio.default
- optional: true
----
-################################
-# Istio-CA cluster-wide
-################################
-# Service account CA
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-ca-service-account
- namespace: {{ istio_namespace }}
----
-# Istio CA watching all namespaces
-apiVersion: v1
-kind: Deployment
-apiVersion: extensions/v1beta1
-metadata:
- name: istio-ca
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: istio-ca
- spec:
- serviceAccountName: istio-ca-service-account
- containers:
- - name: istio-ca
- image: {{ istio_ca_image_repo }}:{{ istio_ca_image_tag }}
- imagePullPolicy: IfNotPresent
----
-
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index acd6f7495..2ee491f06 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -22,14 +22,6 @@ dependencies:
- apps
- registry
- # istio role should be last because it takes a long time to initialize and
- # will cause timeouts trying to start other addons.
- - role: kubernetes-apps/istio
- when: istio_enabled
- tags:
- - apps
- - istio
-
- role: kubernetes-apps/persistent_volumes
when: persistent_volumes_enabled
tags:
diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
index 53ad953b5..44babf343 100644
--- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
@@ -1,4 +1,5 @@
---
+
- name: Weave | Start Resources
kube:
name: "weave-net"
@@ -9,13 +10,12 @@
state: "latest"
when: inventory_hostname == groups['kube-master'][0]
-- name: "Weave | wait for weave to become available"
+- name: Weave | Wait for Weave to become available
uri:
url: http://127.0.0.1:6784/status
return_content: yes
register: weave_status
retries: 180
delay: 5
- until: "{{ weave_status.status == 200 and
- 'Status: ready' in weave_status.content }}"
+ until: "{{ weave_status.status == 200 and 'Status: ready' in weave_status.content }}"
when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
index 8553ec5e2..05a3d944e 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
@@ -1,2 +1,7 @@
---
persistent_volumes_enabled: false
+storage_classes:
+ - name: standard
+ is_default: true
+ parameters:
+ availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index e4d1b138c..80d5fdd29 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -1,21 +1,19 @@
---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template:
- src: "{{item.file}}"
- dest: "{{kube_config_dir}}/{{item.file}}"
- with_items:
- - {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
+ src: "openstack-storage-class.yml.j2"
+ dest: "{{kube_config_dir}}/openstack-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
- name: "{{item.item.name}}"
+ name: storage-class
kubectl: "{{bin_dir}}/kubectl"
- resource: "{{item.item.type}}"
- filename: "{{kube_config_dir}}/{{item.item.file}}"
+ resource: StorageClass
+ filename: "{{kube_config_dir}}/openstack-storage-class.yml"
state: "latest"
- with_items: "{{ manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
+ - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
deleted file mode 100644
index 02d39dd97..000000000
--- a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
-provisioner: kubernetes.io/cinder
-parameters:
- availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
new file mode 100644
index 000000000..629c1f0a3
--- /dev/null
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
@@ -0,0 +1,14 @@
+{% for class in storage_classes %}
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: "{{ class.name }}"
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
+provisioner: kubernetes.io/cinder
+parameters:
+{% for key, value in (class.parameters | default({})).items() %}
+ "{{ key }}": "{{ value }}"
+{% endfor %}
+{% endfor %}
diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md
index 81615631e..c320f2bd4 100644
--- a/roles/kubernetes-apps/registry/README.md
+++ b/roles/kubernetes-apps/registry/README.md
@@ -110,18 +110,18 @@ metadata:
name: kube-registry-v0
namespace: kube-system
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
template:
metadata:
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
@@ -164,12 +164,12 @@ metadata:
name: kube-registry
namespace: kube-system
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
ports:
- name: registry
port: 5000
@@ -257,7 +257,7 @@ You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
``` console
-$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
+$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index 67b2da325..4da9ad30a 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -5,7 +5,7 @@
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
- https://{{ kube_apiserver_address }}:{{ kube_apiserver_port }}
+ https://{{ kube_apiserver_access_address }}:{{ kube_apiserver_port }}
{%- endif -%}
tags:
- facts
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 2b6e739db..26fa22d8a 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -2,12 +2,11 @@
- name: Set kubeadm_discovery_address
set_fact:
kubeadm_discovery_address: >-
- {%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%}
+ {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint }}
{%- endif %}
- when: not is_kube_master
tags:
- facts
@@ -28,23 +27,36 @@
register: temp_token
delegate_to: "{{ groups['kube-master'][0] }}"
+- name: gets the kubeadm version
+ command: "{{ bin_dir }}/kubeadm version -o short"
+ register: kubeadm_output
+
+- name: sets kubeadm api version to v1alpha1
+ set_fact:
+ kubeadmConfig_api_version: v1alpha1
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+
+- name: defaults kubeadm api version to v1alpha2
+ set_fact:
+ kubeadmConfig_api_version: v1alpha2
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
+
- name: Create kubeadm client config
template:
- src: kubeadm-client.conf.j2
- dest: "{{ kube_config_dir }}/kubeadm-client.conf"
+ src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
+ dest: "{{ kube_config_dir }}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf"
backup: yes
when: not is_kube_master
vars:
kubeadm_token: "{{ temp_token.stdout }}"
- register: kubeadm_client_conf
- name: Join to cluster if needed
command: >-
{{ bin_dir }}/kubeadm join
- --config {{ kube_config_dir}}/kubeadm-client.conf
+ --config {{ kube_config_dir}}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf
--ignore-preflight-errors=all
register: kubeadm_join
- when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists)
+ when: not is_kube_master and (not kubelet_conf.stat.exists)
- name: Wait for kubelet bootstrap to create config
wait_for:
@@ -53,18 +65,33 @@
timeout: 60
- name: Update server field in kubelet kubeconfig
- replace:
- path: "{{ kube_config_dir }}/kubelet.conf"
- regexp: '(\s+)https://{{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$'
- replace: '\1{{ kube_apiserver_endpoint }}\2'
+ lineinfile:
+ dest: "{{ kube_config_dir }}/kubelet.conf"
+ regexp: 'server:'
+ line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
notify: restart kubelet
+- name: Update server field in kube-proxy kubeconfig
+ shell: >-
+ {{ bin_dir }}/kubectl get configmap kube-proxy -n kube-system -o yaml
+ | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
+ | {{ bin_dir }}/kubectl replace -f -
+ delegate_to: "{{groups['kube-master']|first}}"
+ run_once: true
+ when: is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
+
+- name: Restart all kube-proxy pods to ensure that they load the new configmap
+ shell: "{{ bin_dir }}/kubectl delete pod -n kube-system -l k8s-app=kube-proxy"
+ delegate_to: "{{groups['kube-master']|first}}"
+ run_once: true
+ when: is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
+
# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
- name: Symlink kubelet kubeconfig for calico/canal
file:
- src: "{{ kube_config_dir }}//kubelet.conf"
+ src: "{{ kube_config_dir }}/kubelet.conf"
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
state: link
force: yes
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
similarity index 100%
rename from roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
rename to roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
new file mode 100644
index 000000000..e2cd04a86
--- /dev/null
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
@@ -0,0 +1,13 @@
+apiVersion: kubeadm.k8s.io/v1alpha2
+kind: NodeConfiguration
+clusterName: {{ cluster_name }}
+discoveryFile: ""
+caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+discoveryToken: {{ kubeadm_token }}
+tlsBootstrapToken: {{ kubeadm_token }}
+token: {{ kubeadm_token }}
+discoveryTokenAPIServers:
+- {{ kubeadm_discovery_address | replace("https://", "")}}
+discoveryTokenUnsafeSkipCAVerification: true
+nodeRegistration:
+ name: {{ inventory_hostname }}
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index a050be1da..68a09cef0 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -24,6 +24,29 @@ kube_apiserver_storage_backend: etcd3
# By default, force back to etcd2. Set to true to force etcd3 (experimental!)
force_etcd3: false
+# audit support
+kubernetes_audit: false
+audit_log_path: /var/log/audit/kube-apiserver-audit.log
+# num days
+audit_log_maxage: 30
+# the num of audit logs to retain
+audit_log_maxbackups: 1
+ # the max size in MB to retain
+audit_log_maxsize: 100
+# policy file
+audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
+
+# audit log hostpath
+audit_log_name: audit-logs
+audit_log_hostpath: /var/log/kubernetes/audit
+audit_log_mountpath: /var/log/audit
+audit_log_writable: true
+
+# audit policy hostpath
+audit_policy_name: audit-policy
+audit_policy_hostpath: /etc/kubernetes/audit-policy
+audit_policy_mountpath: "{{ audit_policy_hostpath }}"
+
# Limits for kube components
kube_controller_memory_limit: 512M
kube_controller_cpu_limit: 250m
@@ -41,7 +64,7 @@ kube_apiserver_cpu_limit: 800m
kube_apiserver_memory_requests: 256M
kube_apiserver_cpu_requests: 100m
-# Admission control plug-ins
+# 1.9 and below Admission control plug-ins
kube_apiserver_admission_control:
- Initializers
- NamespaceLifecycle
@@ -56,6 +79,12 @@ kube_apiserver_admission_control:
{%- endif -%}
- ResourceQuota
+# 1.10+ admission plugins
+kube_apiserver_enable_admission_plugins: []
+
+# 1.10+ list of disabled admission plugins
+kube_apiserver_disable_admission_plugins: []
+
# extra runtime config
kube_api_runtime_config:
- admissionregistration.k8s.io/v1alpha1
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index b841d8357..2ba7485a1 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -65,14 +65,38 @@
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
changed_when: false
+- name: Create audit-policy directory
+ file: path={{ kube_config_dir }}/audit-policy state=directory
+ when: kubernetes_audit|default(false)
+
+- name: Write api audit policy yaml
+ template:
+ src: apiserver-audit-policy.yaml.j2
+ dest: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
+ when: kubernetes_audit|default(false)
+
+- name: gets the kubeadm version
+ command: "{{ bin_dir }}/kubeadm version -o short"
+ register: kubeadm_output
+
+- name: sets kubeadm api version to v1alpha1
+ set_fact:
+ kubeadmConfig_api_version: v1alpha1
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+
+- name: defaults kubeadm api version to v1alpha2
+ set_fact:
+ kubeadmConfig_api_version: v1alpha2
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
+
- name: kubeadm | Create kubeadm config
template:
- src: kubeadm-config.yaml.j2
- dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
+ src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
+ dest: "{{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
register: kubeadm_config
- name: kubeadm | Initialize first master
- command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+ command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
register: kubeadm_init
# Retry is because upload config sometimes fails
retries: 3
@@ -85,7 +109,7 @@
timeout -k 240s 240s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
- --config={{ kube_config_dir }}/kubeadm-config.yaml
+ --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--allow-release-candidate-upgrades
@@ -98,7 +122,7 @@
# FIXME(mattymo): remove when https://github.com/kubernetes/kubeadm/issues/433 is fixed
- name: kubeadm | Enable kube-proxy
- command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.yaml"
+ command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
when: inventory_hostname == groups['kube-master']|first
changed_when: false
@@ -135,7 +159,7 @@
when: inventory_hostname != groups['kube-master']|first
- name: kubeadm | Init other uninitialized masters
- command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+ command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
register: kubeadm_init
when: inventory_hostname != groups['kube-master']|first and not kubeadm_ca.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
@@ -146,7 +170,7 @@
timeout -k 240s 240s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
- --config={{ kube_config_dir }}/kubeadm-config.yaml
+ --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--allow-release-candidate-upgrades
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index daa10fd79..66bf261e5 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -3,12 +3,6 @@
tags:
- k8s-pre-upgrade
-# upstream bug: https://github.com/kubernetes/kubeadm/issues/441
-- name: Disable kube_basic_auth until kubeadm/441 is fixed
- set_fact:
- kube_basic_auth: false
- when: kubeadm_enabled|bool|default(false)
-
- import_tasks: users-file.yml
when: kube_basic_auth|default(true)
@@ -29,7 +23,7 @@
- upgrade
- name: Copy kubectl from hyperkube container
- command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp /hyperkube /systembindir/kubectl"
+ command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -f /hyperkube /systembindir/kubectl"
when: kubectl_task_compare_result.rc != 0
register: kubectl_task_result
until: kubectl_task_result.rc == 0
diff --git a/roles/kubernetes/master/tasks/static-pod-setup.yml b/roles/kubernetes/master/tasks/static-pod-setup.yml
index ca00ca33c..b1fbdc095 100644
--- a/roles/kubernetes/master/tasks/static-pod-setup.yml
+++ b/roles/kubernetes/master/tasks/static-pod-setup.yml
@@ -1,4 +1,19 @@
---
+- name: Create audit-policy directory
+ file: path={{ kube_config_dir }}/audit-policy state=directory
+ tags:
+ - kube-apiserver
+ when: kubernetes_audit|default(false)
+
+- name: Write api audit policy yaml
+ template:
+ src: apiserver-audit-policy.yaml.j2
+ dest: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
+ notify: Master | Restart apiserver
+ tags:
+ - kube-apiserver
+ when: kubernetes_audit|default(false)
+
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
diff --git a/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2 b/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
new file mode 100644
index 000000000..40d6a8bb5
--- /dev/null
+++ b/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
@@ -0,0 +1,125 @@
+apiVersion: audit.k8s.io/v1beta1
+kind: Policy
+rules:
+ # The following requests were manually identified as high-volume and low-risk,
+ # so drop them.
+ - level: None
+ users: ["system:kube-proxy"]
+ verbs: ["watch"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints", "services", "services/status"]
+ - level: None
+ # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
+ # TODO(#46983): Change this to the ingress controller service account.
+ users: ["system:unsecured"]
+ namespaces: ["kube-system"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["configmaps"]
+ - level: None
+ users: ["kubelet"] # legacy kubelet identity
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ userGroups: ["system:nodes"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ users:
+ - system:kube-controller-manager
+ - system:kube-scheduler
+ - system:serviceaccount:kube-system:endpoint-controller
+ verbs: ["get", "update"]
+ namespaces: ["kube-system"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints"]
+ - level: None
+ users: ["system:apiserver"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
+ # Don't log HPA fetching metrics.
+ - level: None
+ users:
+ - system:kube-controller-manager
+ verbs: ["get", "list"]
+ resources:
+ - group: "metrics.k8s.io"
+ # Don't log these read-only URLs.
+ - level: None
+ nonResourceURLs:
+ - /healthz*
+ - /version
+ - /swagger*
+ # Don't log events requests.
+ - level: None
+ resources:
+ - group: "" # core
+ resources: ["events"]
+ # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
+ # so only log at the Metadata level.
+ - level: Metadata
+ resources:
+ - group: "" # core
+ resources: ["secrets", "configmaps"]
+ - group: authentication.k8s.io
+ resources: ["tokenreviews"]
+ omitStages:
+ - "RequestReceived"
+ # Get responses can be large; skip them.
+ - level: Request
+ verbs: ["get", "list", "watch"]
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for known APIs
+ - level: RequestResponse
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for all other requests.
+ - level: Metadata
+ omitStages:
+ - "RequestReceived"
diff --git a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
index 6616adc6f..5a13d7a1e 100644
--- a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
+++ b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
@@ -2,17 +2,26 @@
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
- {"name" : "PodFitsHostPorts"},
- {"name" : "PodFitsResources"},
+ {"name" : "MaxEBSVolumeCount"},
+ {"name" : "MaxGCEPDVolumeCount"},
+ {"name" : "MaxAzureDiskVolumeCount"},
+ {"name" : "MatchInterPodAffinity"},
{"name" : "NoDiskConflict"},
- {"name" : "MatchNodeSelector"},
- {"name" : "HostName"}
+ {"name" : "GeneralPredicates"},
+ {"name" : "CheckNodeMemoryPressure"},
+ {"name" : "CheckNodeDiskPressure"},
+ {"name" : "CheckNodeCondition"},
+ {"name" : "PodToleratesNodeTaints"},
+ {"name" : "CheckVolumeBinding"}
],
"priorities" : [
+ {"name" : "SelectorSpreadPriority", "weight" : 1},
+ {"name" : "InterPodAffinityPriority", "weight" : 1},
{"name" : "LeastRequestedPriority", "weight" : 1},
{"name" : "BalancedResourceAllocation", "weight" : 1},
- {"name" : "ServiceSpreadingPriority", "weight" : 1},
- {"name" : "EqualPriority", "weight" : 1}
+ {"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
+ {"name" : "NodeAffinityPriority", "weight" : 1},
+ {"name" : "TaintTolerationPriority", "weight" : 1}
],
"hardPodAffinitySymmetricWeight" : 10
}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
similarity index 83%
rename from roles/kubernetes/master/templates/kubeadm-config.yaml.j2
rename to roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
index 475d2d0ae..401892ca9 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
@@ -34,12 +34,21 @@ apiServerExtraArgs:
bind-address: {{ kube_apiserver_bind_address }}
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
insecure-port: "{{ kube_apiserver_insecure_port }}"
+{% if kube_version | version_compare('v1.10', '<') %}
admission-control: {{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
apiserver-count: "{{ kube_apiserver_count }}"
{% if kube_version | version_compare('v1.9', '>=') %}
endpoint-reconciler-type: lease
{% endif %}
-{% if etcd_events_cluster_setup %}
+{% if etcd_events_cluster_enabled %}
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
@@ -75,6 +84,12 @@ controllerManagerExtraArgs:
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
node-monitor-period: {{ kube_controller_node_monitor_period }}
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+controllerManagerExtraVolumes:
+- name: openstackcacert
+ hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
{% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
new file mode 100644
index 000000000..29aac6f87
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -0,0 +1,131 @@
+apiVersion: kubeadm.k8s.io/v1alpha2
+kind: MasterConfiguration
+api:
+ advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
+ bindPort: {{ kube_apiserver_port }}
+etcd:
+ external:
+ endpoints:
+{% for endpoint in etcd_access_addresses.split(',') %}
+ - {{ endpoint }}
+{% endfor %}
+ caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
+ certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
+ keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+{% if kubernetes_audit %}
+auditPolicy:
+ logDir: {{ audit_log_path }}
+ logMaxAge: {{ audit_log_maxage }}
+ path: {{ audit_policy_file }}
+{% endif %}
+networking:
+ dnsDomain: {{ dns_domain }}
+ serviceSubnet: {{ kube_service_addresses }}
+ podSubnet: {{ kube_pods_subnet }}
+kubernetesVersion: {{ kube_version }}
+{% if cloud_provider is defined and cloud_provider != "gce" %}
+cloudProvider: {{ cloud_provider }}
+{% endif %}
+kubeProxy:
+ config:
+ mode: {{ kube_proxy_mode }}
+ hostnameOverride: {{ inventory_hostname }}
+authorizationModes:
+{% for mode in authorization_modes %}
+- {{ mode }}
+{% endfor %}
+apiServerExtraArgs:
+ bind-address: {{ kube_apiserver_bind_address }}
+ insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
+ insecure-port: "{{ kube_apiserver_insecure_port }}"
+{% if kube_version | version_compare('v1.10', '<') %}
+ admission-control: {{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
+ apiserver-count: "{{ kube_apiserver_count }}"
+{% if kube_version | version_compare('v1.9', '>=') %}
+ endpoint-reconciler-type: lease
+{% endif %}
+{% if etcd_events_cluster_enabled %}
+ etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
+{% endif %}
+ service-node-port-range: {{ kube_apiserver_node_port_range }}
+ kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
+{% if kube_basic_auth|default(true) %}
+ basic-auth-file: {{ kube_users_dir }}/known_users.csv
+{% endif %}
+{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
+ oidc-issuer-url: {{ kube_oidc_url }}
+ oidc-client-id: {{ kube_oidc_client_id }}
+{% if kube_oidc_ca_file is defined %}
+ oidc-ca-file: {{ kube_oidc_ca_file }}
+{% endif %}
+{% if kube_oidc_username_claim is defined %}
+ oidc-username-claim: {{ kube_oidc_username_claim }}
+{% endif %}
+{% if kube_oidc_groups_claim is defined %}
+ oidc-groups-claim: {{ kube_oidc_groups_claim }}
+{% endif %}
+{% endif %}
+{% if kube_encrypt_secret_data %}
+ experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
+{% endif %}
+ storage-backend: {{ kube_apiserver_storage_backend }}
+{% if kube_api_runtime_config is defined %}
+ runtime-config: {{ kube_api_runtime_config | join(',') }}
+{% endif %}
+ allow-privileged: "true"
+{% for key in kube_kubeadm_apiserver_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
+{% endfor %}
+controllerManagerExtraArgs:
+ node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
+ node-monitor-period: {{ kube_controller_node_monitor_period }}
+ pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
+{% if kubernetes_audit %}
+apiServerExtraVolumes:
+- name: {{ audit_policy_name }}
+ hostPath: {{ audit_policy_hostpath }}
+ mountPath: {{ audit_policy_mountpath }}
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+controllerManagerExtraVolumes:
+- name: openstackcacert
+ hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% for key in kube_kubeadm_controller_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
+{% endfor %}
+{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
+schedulerExtraArgs:
+{% for key in kube_kubeadm_scheduler_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
+{% endfor %}
+{% endif %}
+apiServerCertSANs:
+{% for san in apiserver_sans.split(' ') | unique %}
+ - {{ san }}
+{% endfor %}
+certificatesDir: {{ kube_config_dir }}/ssl
+unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
+nodeRegistration:
+{% if kube_override_hostname|default('') %}
+ name: {{ kube_override_hostname }}
+{% endif %}
+ taints:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+{% if kubernetes_audit %}
+featureGates:
+ Auditing: true
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index b638ff457..9cec5ded7 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -28,9 +28,16 @@ spec:
command:
- /hyperkube
- apiserver
+{% if kubernetes_audit %}
+ - --audit-log-path={{ audit_log_path }}
+ - --audit-log-maxage={{ audit_log_maxage }}
+ - --audit-log-maxbackup={{ audit_log_maxbackups }}
+ - --audit-log-maxsize={{ audit_log_maxsize }}
+ - --audit-policy-file={{ audit_policy_file }}
+{% endif %}
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }}
-{% if etcd_events_cluster_setup %}
+{% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
{% endif %}
{% if kube_version | version_compare('v1.9', '<') %}
@@ -45,7 +52,16 @@ spec:
{% if kube_version | version_compare('v1.9', '>=') %}
- --endpoint-reconciler-type=lease
{% endif %}
+{% if kube_version | version_compare('v1.10', '<') %}
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ - --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ - --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
- --service-cluster-ip-range={{ kube_service_addresses }}
- --service-node-port-range={{ kube_apiserver_node_port_range }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
@@ -175,6 +191,14 @@ spec:
- mountPath: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
readOnly: true
+{% endif %}
+{% if kubernetes_audit %}
+ - mountPath: {{ audit_log_mountpath }}
+ name: {{ audit_log_name }}
+ Writable: true
+ - mountPath: {{ audit_policy_mountpath }}
+ name: {{ audit_policy_name }}
+ Writable: true
{% endif %}
volumes:
- hostPath:
@@ -196,3 +220,11 @@ spec:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}
+{% if kubernetes_audit %}
+ - hostPath:
+ path: {{ audit_log_hostpath }}
+ name: {{ audit_log_name }}
+ - hostPath:
+ path: {{ audit_policy_hostpath }}
+ name: {{ audit_policy_name }}
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index 012372496..85e6043e6 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -94,6 +94,11 @@ spec:
- mountPath: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
readOnly: true
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+ - mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ name: openstackcacert
+ readOnly: true
{% endif %}
volumes:
- name: ssl-certs-host
@@ -115,3 +120,8 @@ spec:
path: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+ - hostPath:
+ path: "{{ kube_config_dir }}/openstack-cacert.pem"
+ name: openstackcacert
+{% endif %}
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 9a3a08e5b..2c541c112 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -14,6 +14,9 @@ kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
# resolv.conf to base dns config
kube_resolv_conf: "/etc/resolv.conf"
+# bind address for kube-proxy health check
+kube_proxy_healthz_bind_address: "127.0.0.1"
+
# Can be ipvs, iptables
kube_proxy_mode: iptables
@@ -111,6 +114,7 @@ openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_
openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
+openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index 63a529ace..fe4b6c9c8 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -1,19 +1,4 @@
---
-- name: install | Set SSL CA directories
- set_fact:
- ssl_ca_dirs: "[
- {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
- '/usr/share/ca-certificates',
- {% elif ansible_os_family == 'RedHat' -%}
- '/etc/pki/tls',
- '/etc/pki/ca-trust',
- {% elif ansible_os_family == 'Debian' -%}
- '/usr/share/ca-certificates',
- {% endif -%}
- ]"
- tags:
- - facts
-
- name: Set kubelet deployment to host if kubeadm is enabled
set_fact:
kubelet_deployment_type: host
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index f7520caf8..7f807ceeb 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -110,13 +110,13 @@
modprobe:
name: "{{ item }}"
state: present
- when: kube_proxy_mode == 'ipvs'
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
+ when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
index 4ca17ef53..e313161a0 100644
--- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -75,9 +75,6 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% else %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
-{% if inventory_hostname in groups['kube-ingress']|default([]) %}
-{% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
-{% endif %}
{% set inventory_node_labels = [] %}
{% if node_labels is defined %}
{% for labelname, labelvalue in node_labels.iteritems() %}
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index b53102539..d75f4c662 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -18,7 +18,6 @@ EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
ExecStart=/usr/bin/rkt run \
{% if kubelet_load_modules == true %}
- --volume modprobe,kind=host,source=/usr/sbin/modprobe \
--volume lib-modules,kind=host,source=/lib/modules \
{% endif %}
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
@@ -51,7 +50,6 @@ ExecStart=/usr/bin/rkt run \
{% endif %}
{% endif %}
{% if kubelet_load_modules == true %}
- --mount volume=modprobe,target=/usr/sbin/modprobe \
--mount volume=lib-modules,target=/lib/modules \
{% endif %}
--mount volume=etc-cni,target=/etc/cni \
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
index 83d657f7e..031a59fdb 100644
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2
@@ -40,6 +40,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
{% endif %}
+{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+--cgroup-driver=systemd \
+{% endif %}
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
{# DNS settings for kubelet #}
@@ -91,9 +94,6 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% else %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
-{% if inventory_hostname in groups['kube-ingress']|default([]) %}
-{% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
-{% endif %}
{% set inventory_node_labels = [] %}
{% if node_labels is defined %}
{% for labelname, labelvalue in node_labels.iteritems() %}
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index 2209709b6..7096a2ff2 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -42,7 +42,7 @@ spec:
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }}
- --oom-score-adj=-998
- - --healthz-bind-address=127.0.0.1
+ - --healthz-bind-address={{ kube_proxy_healthz_bind_address }}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all
{% elif kube_proxy_mode == 'ipvs' %}
diff --git a/roles/kubernetes/node/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2
index e4dd33559..b6814b51b 100644
--- a/roles/kubernetes/node/templates/openstack-cloud-config.j2
+++ b/roles/kubernetes/node/templates/openstack-cloud-config.j2
@@ -12,6 +12,9 @@ domain-name="{{ openstack_domain_name }}"
{% elif openstack_domain_id is defined and openstack_domain_id != "" %}
domain-id ="{{ openstack_domain_id }}"
{% endif %}
+{% if openstack_cacert is defined and openstack_cacert != "" %}
+ca-file="{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
{% if openstack_blockstorage_version is defined %}
[BlockStorage]
diff --git a/roles/kubernetes/node/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
index d82d72bf8..1383f78bb 100644
--- a/roles/kubernetes/node/templates/vsphere-cloud-config.j2
+++ b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
@@ -14,6 +14,9 @@ server = "{{ vsphere_vcenter_ip }}"
{% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %}
vm-uuid = "{{ vsphere_vm_uuid }}"
{% endif %}
+{% if vsphere_vm_name is defined and vsphere_vm_name != "" %}
+vm-name = "{{ vsphere_vm_name }}"
+{% endif %}
{% endif %}
{% if kube_version | version_compare('v1.9.2', '>=') %}
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index e21587517..965dcbda0 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -14,6 +14,7 @@ common_required_pkgs:
- bash-completion
- socat
- unzip
+ - nss
# Set to true if your network does not support IPv6
# This maybe necessary for pulling Docker images from
diff --git a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
index 2df6962e8..3e737fea3 100644
--- a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
@@ -12,6 +12,8 @@
failed_when: False
changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
register: growpart_needed
+ environment:
+ LC_ALL: C
- name: check fs type
command: file -Ls /dev/sda1
@@ -21,7 +23,9 @@
- name: run growpart
command: growpart /dev/sda 1
when: growpart_needed.changed
+ environment:
+ LC_ALL: C
- name: run xfs_growfs
command: xfs_growfs /dev/sda1
- when: growpart_needed.changed and 'XFS' in fs_type.stdout
\ No newline at end of file
+ when: growpart_needed.changed and 'XFS' in fs_type.stdout
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index fcbea6404..4db366ced 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# Disable swap
+- import_tasks: swapoff.yml
+ when: disable_swap
+
- import_tasks: verify-settings.yml
tags:
- asserts
@@ -307,3 +311,17 @@
- ansible_distribution in ["CentOS","RedHat"]
tags:
- bootstrap-os
+
+- name: Write cacert file
+ copy:
+ content: "{{ openstack_cacert }}"
+ dest: "{{ kube_config_dir }}/openstack-cacert.pem"
+ group: "{{ kube_cert_group }}"
+ mode: 0640
+ when:
+ - inventory_hostname in groups['k8s-cluster']
+ - cloud_provider is defined
+ - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+ - openstack_cacert is defined
+ tags:
+ - cloud-provider
diff --git a/roles/kubernetes/preinstall/tasks/swapoff.yml b/roles/kubernetes/preinstall/tasks/swapoff.yml
new file mode 100644
index 000000000..345e75825
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/swapoff.yml
@@ -0,0 +1,10 @@
+---
+- name: Remove swapfile from /etc/fstab
+ mount:
+ name: swap
+ fstype: swap
+ state: absent
+
+- name: Disable swap
+ command: swapoff -a
+ when: ansible_swaptotal_mb > 0
diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml
index 0f7c8bdc3..581acdc8f 100644
--- a/roles/kubernetes/preinstall/tasks/verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml
@@ -17,13 +17,13 @@
- name: Stop if unknown network plugin
assert:
- that: network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud']
- when: network_plugin is defined
+ that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'contiv']
+ when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if incompatible network plugin and cloudprovider
assert:
- that: network_plugin != 'calico'
+ that: kube_network_plugin != 'calico'
msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details."
when: cloud_provider is defined and cloud_provider == 'azure'
ignore_errors: "{{ ignore_assert_errors }}"
@@ -68,7 +68,7 @@
# NOTICE: the check blatantly ignores the inet6-case
- name: Guarantee that enough network address space is available for all pods
assert:
- that: "{{ kubelet_max_pods <= ((32 - kube_network_node_prefix) ** 2) - 2 }}"
+ that: "{{ kubelet_max_pods <= (2 ** (32 - kube_network_node_prefix)) - 2 }}"
msg: "Do not schedule more pods on a node than inet addresses are available."
ignore_errors: "{{ ignore_assert_errors }}"
when:
@@ -86,12 +86,6 @@
when: access_ip is defined
ignore_errors: "{{ ignore_assert_errors }}"
-- name: Stop if swap enabled
- assert:
- that: ansible_swaptotal_mb == 0
- when: kubelet_fail_swap_on|default(true)
- ignore_errors: "{{ ignore_assert_errors }}"
-
- name: Stop if RBAC is not enabled when dashboard is enabled
assert:
that: rbac_enabled
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index e8d3e9e94..63b7e7db2 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -33,14 +33,14 @@
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
- '{{ kube_cert_dir }}/admin-{{ host }}.pem'
+ '{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
- {% endfor %}]
+ {% endfor %},
{% for host in groups['k8s-cluster'] %}
- '{{ kube_cert_dir }}/node-{{ host }}.pem'
- '{{ kube_cert_dir }}/node-{{ host }}-key.pem'
- '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem'
+ '{{ kube_cert_dir }}/node-{{ host }}.pem',
+ '{{ kube_cert_dir }}/node-{{ host }}-key.pem',
+ '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
index db93f64e5..8a847b002 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
@@ -1,22 +1,23 @@
---
-#- import_tasks: sync_kube_master_certs.yml
-# when: inventory_hostname in groups['kube-master']
-#
-#- import_tasks: sync_kube_node_certs.yml
-# when: inventory_hostname in groups['k8s-cluster']
+- import_tasks: sync_kube_master_certs.yml
+ when: inventory_hostname in groups['kube-master']
+
+- import_tasks: sync_kube_node_certs.yml
+ when: inventory_hostname in groups['k8s-cluster']
# Issue admin certs to kube-master hosts
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "admin"
- issue_cert_copy_ca: true
+ issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
- issue_cert_path: "{{ inventory_hostname }}"
+ issue_cert_path: "{{ item }}"
issue_cert_role: kube-master
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+ with_items: "{{ kube_admin_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
- name: gen_certs_vault | Set fact about certificate alt names
@@ -24,7 +25,7 @@
kube_cert_alt_names: >-
{{
groups['kube-master'] +
- ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
+ ['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
['localhost']
}}
run_once: true
@@ -43,6 +44,7 @@
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
+ issue_cert_run_once: true
issue_cert_ip_sans: >-
[
{%- for host in groups['kube-master'] -%}
@@ -58,10 +60,11 @@
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
- issue_cert_path: "{{ inventory_hostname }}"
+ issue_cert_path: "{{ item }}"
issue_cert_role: kube-master
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+ with_items: "{{ kube_master_components_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
notify: set secret_changed
@@ -71,33 +74,37 @@
# Need to strip out the 'node-' prefix from the cert name so it can be used
# with the node authorization plugin ( CN matches kubelet node name )
issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] | regex_replace('^node-', '') }}"
- issue_cert_copy_ca: yes
+ issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
- issue_cert_path: "{{ inventory_hostname }}"
+ issue_cert_path: "{{ item }}"
issue_cert_role: kube-node
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+ with_items: "{{ kube_node_certs_needed|d([]) }}"
+ when: inventory_hostname in groups['k8s-cluster']
# Issue proxy certs to k8s-cluster nodes
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "system:kube-proxy"
- issue_cert_copy_ca: true
+ issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
- issue_cert_path: "{{ inventory_hostname }}"
+ issue_cert_path: "{{ item }}"
issue_cert_role: kube-proxy
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+ with_items: "{{ kube_proxy_certs_needed|d([]) }}"
+ when: inventory_hostname in groups['k8s-cluster']
# Issue front proxy cert to kube-master hosts
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "front-proxy-client"
- issue_cert_copy_ca: true
+ issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
issue_cert_ca_filename: front-proxy-ca.pem
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}"
@@ -118,9 +125,10 @@
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
- issue_cert_path: "{{ inventory_hostname }}"
+ issue_cert_path: "{{ item }}"
issue_cert_role: front-proxy-client
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+ with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
notify: set secret_changed
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
index 52fedae5b..d36c3a057 100644
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ b/roles/kubernetes/secrets/tasks/main.yml
@@ -2,11 +2,13 @@
- import_tasks: check-certs.yml
tags:
- k8s-secrets
+ - k8s-gen-certs
- facts
- import_tasks: check-tokens.yml
tags:
- k8s-secrets
+ - k8s-gen-tokens
- facts
- name: Make sure the certificate directory exits
@@ -70,10 +72,12 @@
- include_tasks: "gen_certs_{{ cert_management }}.yml"
tags:
- k8s-secrets
+ - k8s-gen-certs
- import_tasks: upd_ca_trust.yml
tags:
- k8s-secrets
+ - k8s-gen-certs
- name: "Gen_certs | Get certificate serials on kube masters"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
@@ -85,6 +89,10 @@
- "kube-controller-manager.pem"
- "kube-scheduler.pem"
when: inventory_hostname in groups['kube-master']
+ tags:
+ - master
+ - kubelet
+ - node
- name: "Gen_certs | set kube master certificate serial facts"
set_fact:
@@ -93,6 +101,10 @@
controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}"
scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}"
when: inventory_hostname in groups['kube-master']
+ tags:
+ - master
+ - kubelet
+ - node
- name: "Gen_certs | Get certificate serials on kube nodes"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
@@ -108,7 +120,11 @@
kubelet_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}"
when: inventory_hostname in groups['k8s-cluster']
+ tags:
+ - kubelet
+ - node
- import_tasks: gen_tokens.yml
tags:
- k8s-secrets
+ - k8s-gen-tokens
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index adeb84dc6..9f7cf5e11 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -12,9 +12,11 @@ kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
+# optional disable the swap
+disable_swap: true
## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.10.2
+kube_version: v1.11.2
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: iptables
@@ -144,7 +146,13 @@ docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_options: >
+ --insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
+ {% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+ --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+ --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+ --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+ {% endif %}
## If non-empty will override default system MounFlags value.
## This option takes a mount propagation flag: shared, slave
@@ -178,7 +186,6 @@ dashboard_enabled: true
# Addons which can be enabled
efk_enabled: false
helm_enabled: false
-istio_enabled: false
registry_enabled: false
enable_network_policy: false
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
@@ -210,7 +217,7 @@ authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
-kubelet_authentication_token_webhook: false
+kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
@@ -218,7 +225,6 @@ kubelet_authorization_mode_webhook: false
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates:
- - "Initializers={{ istio_enabled | string }}"
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
@@ -277,6 +283,18 @@ proxy_env:
https_proxy: "{{ https_proxy| default ('') }}"
no_proxy: "{{ no_proxy| default ('') }}"
+ssl_ca_dirs: >-
+ [
+ {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
+ '/usr/share/ca-certificates',
+ {% elif ansible_os_family == 'RedHat' -%}
+ '/etc/pki/tls',
+ '/etc/pki/ca-trust',
+ {% elif ansible_os_family == 'Debian' -%}
+ '/usr/share/ca-certificates',
+ {% endif -%}
+ ]
+
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
@@ -314,12 +332,13 @@ kube_apiserver_client_key: |-
{%- endif %}
# Set to true to deploy etcd-events cluster
-etcd_events_cluster_setup: false
+etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
+etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
etcd_events_peer_url: "https://{{ etcd_access_address }}:2382"
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 857ebd11a..553eb6753 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -51,3 +51,5 @@ rbac_resources:
# * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
# calico_ip_auto_method: "interface=eth.*"
+
+calico_baremetal_nodename: "{{ inventory_hostname }}"
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 05e7b9611..18fe597c7 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -49,7 +49,7 @@
changed_when: false
- name: Calico | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
@@ -113,7 +113,7 @@
"apiVersion": "v1",
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
}'
- | {{ bin_dir }}/calicoctl create -f -
+ | {{ bin_dir }}/calicoctl apply -f -
environment:
NO_DEFAULT_POOLS: true
run_once: true
diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
index 6dd51e912..443e3b43b 100644
--- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2
+++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
@@ -6,7 +6,7 @@
{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
- "nodename": "{{ inventory_hostname }}",
+ "nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
"type": "calico",
"etcd_endpoints": "{{ etcd_access_addresses }}",
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index a42c2cfa7..5b1b6a9e0 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -56,7 +56,7 @@
- rbac_enabled or item.type not in rbac_resources
- name: Canal | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml
index 389fe5bd6..dea905b3b 100755
--- a/roles/network_plugin/cilium/defaults/main.yml
+++ b/roles/network_plugin/cilium/defaults/main.yml
@@ -12,9 +12,9 @@ cilium_policy_dir: /etc/kubernetes/policy
# Limits for apps
cilium_memory_limit: 500M
-cilium_cpu_limit: 200m
+cilium_cpu_limit: 500m
cilium_memory_requests: 64M
-cilium_cpu_requests: 50m
+cilium_cpu_requests: 100m
# Optional features
cilium_enable_prometheus: false
diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
index c5051e2ca..cf5758465 100755
--- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
@@ -1,29 +1,49 @@
-kind: ConfigMap
+---
apiVersion: v1
+kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# This etcd-config contains the etcd endpoints of your cluster. If you use
- # TLS please make sure you uncomment the ca-file line and add the respective
- # certificate has a k8s secret, see explanation bellow in the comment labeled
- # "ETCD-CERT"
+ # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
- endpoints:
+ endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- - {{ ip_addr }}
+ - {{ ip_addr }}
{% endfor %}
- #
- # In case you want to use TLS in etcd, uncomment the following line
- # and add the certificate as explained in the comment labeled "ETCD-CERT"
+
+ # In case you want to use TLS in etcd, uncomment the 'ca-file' line
+ # and create a kubernetes secret by following the tutorial in
+ # https://cilium.link/etcd-config
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
- #
+
# In case you want client to server authentication, uncomment the following
- # lines and add the certificate and key in cilium-etcd-secrets bellow
+ # lines and create a kubernetes secret by following the tutorial in
+ # https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
disable-ipv4: "{{ cilium_disable_ipv4 }}"
+ # If you want to clean cilium state; change this value to true
+ clean-cilium-state: "false"
+ legacy-host-allows-world: "false"
+
+ # If you want cilium monitor to aggregate tracing for packets, set this level
+ # to "low", "medium", or "maximum". The higher the level, the less packets
+ # that will be seen in monitor output.
+ monitor-aggregation-level: "none"
+
+ # Regular expression matching compatible Istio sidecar istio-proxy
+ # container image names
+ sidecar-istio-proxy-image: "cilium/istio_proxy"
+
+ # Encapsulation mode for communication between nodes
+ # Possible values:
+ # - disabled
+ # - vxlan (default)
+ # - geneve
+ tunnel: "vxlan"
diff --git a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
index 11fd01087..2e5efff86 100755
--- a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
@@ -1,64 +1,66 @@
---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cilium
rules:
-- apiGroups:
- - "networking.k8s.io"
- resources:
- - networkpolicies
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - namespaces
- - services
- - nodes
- - endpoints
- - componentstatuses
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- verbs:
- - get
- - list
- - watch
- - update
-- apiGroups:
- - extensions
- resources:
- - networkpolicies #FIXME remove this when we drop support for k8s NP-beta GH-1202
- - thirdpartyresources
- - ingresses
- verbs:
- - create
- - get
- - list
- - watch
-- apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - get
- - list
- - watch
- - update
-- apiGroups:
- - cilium.io
- resources:
- - ciliumnetworkpolicies
- - ciliumendpoints
- verbs:
- - "*"
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - services
+ - nodes
+ - endpoints
+ - componentstatuses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies # FIXME remove this when we drop support for k8s NP-beta GH-1202
+ - thirdpartyresources
+ - ingresses
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "apiextensions.k8s.io"
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - cilium.io
+ resources:
+ - ciliumnetworkpolicies
+ - ciliumnetworkpolicies/status
+ - ciliumendpoints
+ - ciliumendpoints/status
+ verbs:
+ - "*"
diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
index 04d603d57..35994bc68 100755
--- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
@@ -1,6 +1,6 @@
---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cilium
roleRef:
@@ -8,8 +8,8 @@ roleRef:
kind: ClusterRole
name: cilium
subjects:
-- kind: ServiceAccount
- name: cilium
- namespace: kube-system
-- kind: Group
- name: system:nodes
+ - kind: ServiceAccount
+ name: cilium
+ namespace: kube-system
+ - kind: Group
+ name: system:nodes
diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
index 8eaa24f32..1ec322916 100755
--- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
@@ -1,10 +1,21 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
spec:
+ updateStrategy:
+ type: "RollingUpdate"
+ rollingUpdate:
+ # Specifies the maximum number of Pods that can be unavailable during the update process.
+ # The current default value is 1 or 100% for daemonsets; Adding an explicit value here
+ # to avoid confusion, as the default value is specific to the type (daemonset/deployment).
+ maxUnavailable: "100%"
+ selector:
+ matchLabels:
+ k8s-app: cilium
+ kubernetes.io/cluster-service: "true"
template:
metadata:
labels:
@@ -26,145 +37,185 @@ spec:
{% if rbac_enabled %}
serviceAccountName: cilium
{% endif %}
+ initContainers:
+ - name: clean-cilium-state
+ image: docker.io/library/busybox:1.28.4
+ imagePullPolicy: IfNotPresent
+ command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi']
+ volumeMounts:
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
+ - name: cilium-run
+ mountPath: /var/run/cilium
+ env:
+ - name: "CLEAN_CILIUM_STATE"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ optional: true
+ key: clean-cilium-state
containers:
- - image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
- imagePullPolicy: Always
- name: cilium-agent
- command: [ "cilium-agent" ]
- args:
- - "--debug=$(CILIUM_DEBUG)"
- - "-t"
- - "vxlan"
- - "--kvstore"
- - "etcd"
- - "--kvstore-opt"
- - "etcd.config=/var/lib/etcd-config/etcd.config"
- - "--disable-ipv4=$(DISABLE_IPV4)"
+ - image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
+ imagePullPolicy: Always
+ name: cilium-agent
+ command: ["cilium-agent"]
+ args:
+ - "--debug=$(CILIUM_DEBUG)"
+ - "--kvstore=etcd"
+ - "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config"
+ - "--disable-ipv4=$(DISABLE_IPV4)"
{% if cilium_enable_prometheus %}
- ports:
- - name: prometheus
- containerPort: 9090
+ ports:
+ - name: prometheus
+ containerPort: 9090
{% endif %}
- lifecycle:
- postStart:
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/cni-install.sh"
+ preStop:
+ exec:
+ command:
+ - "/cni-uninstall.sh"
+ env:
+ - name: "K8S_NODE_NAME"
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: "CILIUM_DEBUG"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: debug
+ - name: "DISABLE_IPV4"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: disable-ipv4
+{% if cilium_enable_prometheus %}
+ # Note: this variable is a no-op if not defined, and is used in the
+ # prometheus examples.
+ - name: "CILIUM_PROMETHEUS_SERVE_ADDR"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-metrics-config
+ optional: true
+ key: prometheus-serve-addr
+{% endif %}
+ - name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ optional: true
+ key: legacy-host-allows-world
+ - name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: sidecar-istio-proxy-image
+ optional: true
+ - name: "CILIUM_TUNNEL"
+ valueFrom:
+ configMapKeyRef:
+ key: tunnel
+ name: cilium-config
+ optional: true
+ - name: "CILIUM_MONITOR_AGGREGATION_LEVEL"
+ valueFrom:
+ configMapKeyRef:
+ key: monitor-aggregation-level
+ name: cilium-config
+ optional: true
+ resources:
+ limits:
+ cpu: {{ cilium_cpu_limit }}
+ memory: {{ cilium_memory_limit }}
+ requests:
+ cpu: {{ cilium_cpu_requests }}
+ memory: {{ cilium_memory_requests }}
+ livenessProbe:
exec:
command:
- - "/cni-install.sh"
- preStop:
+ - cilium
+ - status
+ # The initial delay for the liveness probe is intentionally large to
+ # avoid an endless kill & restart cycle if in the event that the initial
+ # bootstrapping takes longer than expected.
+ initialDelaySeconds: 120
+ failureThreshold: 10
+ periodSeconds: 10
+ readinessProbe:
exec:
command:
- - "/cni-uninstall.sh"
- env:
- - name: "K8S_NODE_NAME"
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: "CILIUM_DEBUG"
- valueFrom:
- configMapKeyRef:
- name: cilium-config
- key: debug
- - name: "DISABLE_IPV4"
- valueFrom:
- configMapKeyRef:
- name: cilium-config
- key: disable-ipv4
-{% if cilium_enable_prometheus %}
- # Note: this variable is a no-op if not defined, and is used in the
- # prometheus examples.
- - name: "CILIUM_PROMETHEUS_SERVE_ADDR"
- valueFrom:
- configMapKeyRef:
- name: cilium-metrics-config
- optional: true
- key: prometheus-serve-addr
-{% endif %}
- resources:
- limits:
- cpu: {{ cilium_cpu_limit }}
- memory: {{ cilium_memory_limit }}
- requests:
- cpu: {{ cilium_cpu_requests }}
- memory: {{ cilium_memory_requests }}
- livenessProbe:
- exec:
- command:
- - cilium
- - status
- # The initial delay for the liveness probe is intentionally large to
- # avoid an endless kill & restart cycle if in the event that the initial
- # bootstrapping takes longer than expected.
- initialDelaySeconds: 120
- failureThreshold: 10
- periodSeconds: 10
- readinessProbe:
- exec:
- command:
- - cilium
- - status
- initialDelaySeconds: 5
- periodSeconds: 5
- volumeMounts:
- - name: bpf-maps
- mountPath: /sys/fs/bpf
- - name: cilium-run
- mountPath: /var/run/cilium
- - name: cni-path
- mountPath: /host/opt/cni/bin
- - name: etc-cni-netd
- mountPath: /host/etc/cni/net.d
- - name: docker-socket
- mountPath: /var/run/docker.sock
- readOnly: true
- - name: etcd-config-path
- mountPath: /var/lib/etcd-config
- readOnly: true
- - name: cilium-certs
- mountPath: {{ cilium_cert_dir }}
- readOnly: true
- securityContext:
- capabilities:
- add:
- - "NET_ADMIN"
- privileged: true
+ - cilium
+ - status
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ volumeMounts:
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
+ - name: cilium-run
+ mountPath: /var/run/cilium
+ - name: cni-path
+ mountPath: /host/opt/cni/bin
+ - name: etc-cni-netd
+ mountPath: /host/etc/cni/net.d
+ - name: docker-socket
+ mountPath: /var/run/docker.sock
+ readOnly: true
+ - name: etcd-config-path
+ mountPath: /var/lib/etcd-config
+ readOnly: true
+ - name: cilium-certs
+ mountPath: {{ cilium_cert_dir }}
+ readOnly: true
+ securityContext:
+ capabilities:
+ add:
+ - "NET_ADMIN"
+ privileged: true
hostNetwork: true
volumes:
- # To keep state between restarts / upgrades
+ # To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
- # To keep state between restarts / upgrades
+ # To keep state between restarts / upgrades
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
- # To read docker events from the node
+ # To read docker events from the node
- name: docker-socket
hostPath:
path: /var/run/docker.sock
- # To install cilium cni plugin in the host
+ # To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
- # To install cilium cni configuration in the host
+ # To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
- path: /etc/cni/net.d
- - name: cilium-certs
- hostPath:
- path: {{ cilium_cert_dir }}
- # To read the etcd config stored in config maps
+ path: /etc/cni/net.d
+ # To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
items:
- - key: etcd-config
- path: etcd.config
+ - key: etcd-config
+ path: etcd.config
+ # To read the k8s etcd secrets in case the user might want to use TLS
+ - name: cilium-certs
+ hostPath:
+ path: {{ cilium_cert_dir }}
+
+ restartPolicy: Always
tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- - effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
- value: "true"
- # Mark cilium's pod as critical for rescheduling
- - key: CriticalAddonsOnly
- operator: "Exists"
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ # Mark cilium's pod as critical for rescheduling
+ - key: CriticalAddonsOnly
+ operator: "Exists"
diff --git a/roles/network_plugin/cloud/tasks/main.yml b/roles/network_plugin/cloud/tasks/main.yml
index 7b6650372..59750770b 100644
--- a/roles/network_plugin/cloud/tasks/main.yml
+++ b/roles/network_plugin/cloud/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Cloud | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml
index d9b372480..bc9dcd3c0 100644
--- a/roles/network_plugin/contiv/tasks/main.yml
+++ b/roles/network_plugin/contiv/tasks/main.yml
@@ -97,7 +97,7 @@
and contiv_enable_api_proxy and contiv_generate_certificate"
- name: Contiv | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -a /opt/cni/bin/* /cnibindir/'"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml
index ab955ebef..ee636e56f 100644
--- a/roles/network_plugin/weave/defaults/main.yml
+++ b/roles/network_plugin/weave/defaults/main.yml
@@ -1,29 +1,58 @@
---
-# Limits
-weave_memory_limits: 400M
-weave_cpu_limits: 300m
-weave_memory_requests: 64M
-weave_cpu_requests: 10m
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: unset
-# weave_peers: unset
-weave_seed: uninitialized
-weave_peers: uninitialized
+# Weave's network password for encryption, if null then no network encryption.
+weave_password: ~
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
+# If set to 1, disable checking for new Weave Net versions (default is blank,
+# i.e. check is enabled)
+weave_checkpoint_disable: false
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
+# Soft limit on the number of connections between peers. Defaults to 100.
+weave_conn_limit: 100
-# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
+# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
+# for containers attached. If you need to disable hairpin, e.g. your kernel is
+# one of those that can panic if hairpin is enabled, then you can disable it by
+# setting `HAIRPIN_MODE=false`.
+weave_hairpin_mode: true
+
+# The range of IP addresses used by Weave Net and the subnet they are placed in
+# (CIDR format; default 10.32.0.0/12)
+weave_ipalloc_range: "{{ kube_pods_subnet }}"
+
+# Set to 0 to disable Network Policy Controller (default is on)
+weave_expect_npc: "{{ enable_network_policy }}"
+
+# List of addresses of peers in the Kubernetes cluster (default is to fetch the
+# list from the api-server)
+weave_kube_peers: ~
+
+# Set the initialization mode of the IP Address Manager (defaults to consensus
+# amongst the KUBE_PEERS)
+weave_ipalloc_init: ~
+
+# Set the IP address used as a gateway from the Weave network to the host
+# network - this is useful if you are configuring the addon as a static pod.
+weave_expose_ip: ~
+
+# Address and port that the Weave Net daemon will serve Prometheus-style
+# metrics on (defaults to 0.0.0.0:6782)
+weave_metrics_addr: ~
+
+# Address and port that the Weave Net daemon will serve status requests on
+# (defaults to disabled)
+weave_status_addr: ~
+
+# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
+# underlying network has a tighter limit, or set a larger size for better
+# performance if your network supports jumbo frames (e.g. 8916)
weave_mtu: 1376
-# this variable is use in seed mode
-weave_ip_current_cluster: "{% for host in groups['k8s-cluster'] %}{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{% if not loop.last %} {% endif %}{% endfor %}"
+# Set to 1 to preserve the client source IP address when accessing Service
+# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
+# only with Weave IPAM (default).
+weave_no_masq_local: true
+
+# Extra variables that passing to launch.sh, useful for enabling seed mode, see
+# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
+weave_extra_args: ~
diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml
index c2c5d82c0..318b6a369 100644
--- a/roles/network_plugin/weave/tasks/main.yml
+++ b/roles/network_plugin/weave/tasks/main.yml
@@ -1,15 +1,7 @@
---
-- import_tasks: seed.yml
- when: weave_mode_seed
-
-- name: template weavenet conflist
- template:
- src: 00-weave.conflist.j2
- dest: /etc/cni/net.d/00-weave.conflist
- owner: kube
- name: Weave | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
@@ -19,9 +11,12 @@
- hyperkube
- upgrade
-- name: Weave | Create weave-net manifest
+- name: Weave | Create manifest
template:
src: weave-net.yml.j2
dest: "{{ kube_config_dir }}/weave-net.yml"
- mode: 0640
- register: weave_manifest
+
+- name: Weave | Fix nodePort for Weave
+ template:
+ src: 00-weave.conflist.j2
+ dest: /etc/cni/net.d/00-weave.conflist
diff --git a/roles/network_plugin/weave/tasks/seed.yml b/roles/network_plugin/weave/tasks/seed.yml
deleted file mode 100644
index 2765267e5..000000000
--- a/roles/network_plugin/weave/tasks/seed.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-- name: Weave seed | Set seed if first time
- set_fact:
- seed: '{% for host in groups["k8s-cluster"] %}{{ hostvars[host]["ansible_default_ipv4"]["macaddress"] }}{% if not loop.last %},{% endif %}{% endfor %}'
- when: "weave_seed == 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set seed if not first time
- set_fact:
- seed: '{{ weave_seed }}'
- when: "weave_seed != 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set peers if fist time
- set_fact:
- peers: '{{ weave_ip_current_cluster }}'
- when: "weave_peers == 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set peers if existing peers
- set_fact:
- peers: '{{ weave_peers }}{% for ip in weave_ip_current_cluster.split(" ") %}{% if ip not in weave_peers.split(" ") %} {{ ip }}{% endif %}{% endfor %}'
- when: "weave_peers != 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Save seed
- lineinfile:
- dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
- state: present
- regexp: '^weave_seed:'
- line: 'weave_seed: {{ seed }}'
- become: no
- delegate_to: 127.0.0.1
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Save peers
- lineinfile:
- dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
- state: present
- regexp: '^weave_peers:'
- line: 'weave_peers: {{ peers }}'
- become: no
- delegate_to: 127.0.0.1
- run_once: true
- tags:
- - confweave
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index 9a7da7377..1995b6677 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -15,7 +15,6 @@ items:
name: weave-net
labels:
name: weave-net
- namespace: kube-system
rules:
- apiGroups:
- ''
@@ -35,13 +34,19 @@ items:
- get
- list
- watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - update
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
- namespace: kube-system
roleRef:
kind: ClusterRole
name: weave-net
@@ -94,7 +99,6 @@ items:
name: weave-net
labels:
name: weave-net
- version: v{{ weave_version }}
namespace: kube-system
spec:
minReadySeconds: 5
@@ -106,31 +110,56 @@ items:
containers:
- name: weave
command:
-{% if weave_mode_seed == true %}
- - /bin/sh
- - -c
- - export EXTRA_ARGS=--name=$(cat /sys/class/net/{{ ansible_default_ipv4['interface'] }}/address) && /home/weave/launch.sh
-{% else %}
- /home/weave/launch.sh
-{% endif %}
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- - name: WEAVE_MTU
- value: "{{ weave_mtu }}"
- - name: IPALLOC_RANGE
- value: {{ kube_pods_subnet }}
-{% if weave_mode_seed == true %}
- - name: KUBE_PEERS
- value: {{ peers }}
- - name: IPALLOC_INIT
- value: seed={{ seed }}
-{% endif %}
- name: WEAVE_PASSWORD
- value: {{ weave_password }}
+ valueFrom:
+ secretKeyRef:
+ name: weave-net
+ key: WEAVE_PASSWORD
+ - name: CHECKPOINT_DISABLE
+ value: "{{ weave_checkpoint_disable | bool | int }}"
+ - name: CONN_LIMIT
+ value: "{{ weave_conn_limit | int }}"
+ - name: HAIRPIN_MODE
+ value: "{{ weave_hairpin_mode | bool }}"
+ - name: IPALLOC_RANGE
+ value: "{{ weave_ipalloc_range }}"
+ - name: EXPECT_NPC
+ value: "{{ weave_expect_npc | bool | int }}"
+{% if weave_kube_peers %}
+ - name: KUBE_PEERS
+ value: "{{ weave_kube_peers }}"
+{% endif %}
+{% if weave_ipalloc_init %}
+ - name: IPALLOC_INIT
+ value: "{{ weave_ipalloc_init }}"
+{% endif %}
+{% if weave_expose_ip %}
+ - name: WEAVE_EXPOSE_IP
+ value: "{{ weave_expose_ip }}"
+{% endif %}
+{% if weave_metrics_addr %}
+ - name: WEAVE_METRICS_ADDR
+ value: "{{ weave_metrics_addr }}"
+{% endif %}
+{% if weave_status_addr %}
+ - name: WEAVE_STATUS_ADDR
+ value: "{{ weave_status_addr }}"
+{% endif %}
+ - name: WEAVE_MTU
+ value: "{{ weave_mtu | int }}"
+ - name: NO_MASQ_LOCAL
+ value: "{{ weave_no_masq_local | bool | int }}"
+{% if weave_extra_args %}
+ - name: EXTRA_ARGS
+ value: "{{ weave_extra_args }}"
+{% endif %}
image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
livenessProbe:
@@ -141,11 +170,7 @@ items:
initialDelaySeconds: 30
resources:
requests:
- cpu: {{ weave_cpu_requests }}
- memory: {{ weave_memory_requests }}
- limits:
- cpu: {{ weave_cpu_limits }}
- memory: {{ weave_memory_limits }}
+ cpu: 10m
securityContext:
privileged: true
volumeMounts:
@@ -175,11 +200,7 @@ items:
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
requests:
- cpu: {{ weave_cpu_requests }}
- memory: {{ weave_memory_requests }}
- limits:
- cpu: {{ weave_cpu_limits }}
- memory: {{ weave_memory_limits }}
+ cpu: 10m
securityContext:
privileged: true
volumeMounts:
@@ -216,7 +237,15 @@ items:
- name: xtables-lock
hostPath:
path: /run/xtables.lock
+ type: FileOrCreate
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: weave-net
+ namespace: kube-system
+ data:
+ WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}"
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 395f9986b..b820bff09 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,8 +1,9 @@
---
- name: Delete node
- command: kubectl delete node {{ item }}
+ command: "{{ bin_dir}}/kubectl delete node {{ item }}"
with_items:
- - "{{ groups['kube-node'] }}"
- delegate_to: "{{ groups['kube-master'][0] }}"
+ - "{{ node.split(',') | default(groups['kube-node']) }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 12091917a..5db5fa13a 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -9,7 +9,8 @@
--timeout {{ drain_timeout }}
--delete-local-data {{ item }}
with_items:
- - "{{ groups['kube-node'] }}"
+ - "{{ node.split(',') | default(groups['kube-node']) }}"
failed_when: false
- delegate_to: "{{ groups['kube-master'][0] }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 9ae683df3..f190dbee8 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -61,7 +61,7 @@
- docker
- name: reset | gather mounted kubelet dirs
- shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac
+ shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
check_mode: no
register: mounted_dirs
tags:
diff --git a/roles/rkt/files/rkt-gc.sh b/roles/rkt/files/rkt-gc.sh
new file mode 100644
index 000000000..e260668cf
--- /dev/null
+++ b/roles/rkt/files/rkt-gc.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+rkt gc
diff --git a/roles/rkt/tasks/main.yml b/roles/rkt/tasks/main.yml
index ab9571b13..00f9e79c4 100644
--- a/roles/rkt/tasks/main.yml
+++ b/roles/rkt/tasks/main.yml
@@ -1,4 +1,13 @@
---
-
- name: Install rkt
import_tasks: install.yml
+ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+
+- name: Set up cron job to do garbage cleanup
+ copy:
+ src: rkt-gc.sh
+ dest: /etc/cron.hourly/rkt-gc.sh
+ owner: root
+ group: root
+ mode: 0750
+ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
diff --git a/roles/vault/handlers/main.yml b/roles/vault/handlers/main.yml
index 55d6d592f..3aeb75041 100644
--- a/roles/vault/handlers/main.yml
+++ b/roles/vault/handlers/main.yml
@@ -3,7 +3,7 @@
command: /bin/true
notify:
- restart vault service
- - set facts about local Vault health
+ - wait for vault up
- unseal vault
- name: wait for vault up
diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml
index 18373ad9a..e4e67d11f 100644
--- a/roles/vault/tasks/bootstrap/main.yml
+++ b/roles/vault/tasks/bootstrap/main.yml
@@ -43,7 +43,7 @@
- "{{ vault_pki_mounts.etcd }}"
loop_control:
loop_var: mount
- when: inventory_hostname in groups.vault and not vault_cluster_is_initialized
+ when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
- include_tasks: ../shared/gen_ca.yml
vars:
diff --git a/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/roles/vault/tasks/bootstrap/sync_vault_certs.yml
index d6b2c6e91..cf499099a 100644
--- a/roles/vault/tasks/bootstrap/sync_vault_certs.yml
+++ b/roles/vault/tasks/bootstrap/sync_vault_certs.yml
@@ -4,6 +4,8 @@
sync_file: "ca.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups.vault }}"
+ sync_file_owner: vault
+ sync_file_group: root
sync_file_is_cert: true
- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
@@ -20,6 +22,8 @@
sync_file: "ca.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
+ sync_file_owner: vault
+ sync_file_group: root
sync_file_is_cert: false
- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
@@ -36,6 +40,8 @@
sync_file: "api.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups.vault }}"
+ sync_file_owner: vault
+ sync_file_group: root
sync_file_is_cert: true
- name: bootstrap/sync_vault_certs | Set fact if Vault's API cert is needed
diff --git a/roles/vault/tasks/cluster/init.yml b/roles/vault/tasks/cluster/init.yml
index 30f64f3b1..fea670df2 100644
--- a/roles/vault/tasks/cluster/init.yml
+++ b/roles/vault/tasks/cluster/init.yml
@@ -1,5 +1,4 @@
---
-
- name: cluster/init | wait for vault
command: /bin/true
notify: wait for vault up
diff --git a/roles/vault/tasks/shared/check_etcd.yml b/roles/vault/tasks/shared/check_etcd.yml
index eaa951114..9ebed2bf1 100644
--- a/roles/vault/tasks/shared/check_etcd.yml
+++ b/roles/vault/tasks/shared/check_etcd.yml
@@ -2,7 +2,7 @@
- name: check_etcd | Check if etcd is up and reachable
uri:
- url: "{{ vault_etcd_url }}/health"
+ url: "{{ vault_etcd_url.split(',') | first }}/health"
validate_certs: no
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
@@ -26,5 +26,5 @@
fail:
msg: >
Unable to start Vault cluster! Etcd is not available at
- {{ vault_etcd_url }} however it is needed by Vault as a backend.
+ {{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
when: vault_etcd_needed|d() and not vault_etcd_available
diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml
index 1ffd515fd..999a36f32 100644
--- a/roles/vault/tasks/shared/check_vault.yml
+++ b/roles/vault/tasks/shared/check_vault.yml
@@ -9,7 +9,9 @@
# Check if vault is reachable on the localhost
- name: check_vault | Attempt to pull local https Vault health
command: /bin/true
- notify: wait for vault up nowait
+ notify:
+ - wait for vault up nowait
+ - set facts about local Vault health
- meta: flush_handlers
@@ -44,6 +46,6 @@
vault_cluster_is_initialized: >-
{{ vault_is_initialized or
hostvars[item]['vault_is_initialized'] or
- 'Key not found' not in vault_etcd_exists.stdout|default('Key not found') }}
+ ('value' in vault_etcd_exists.stdout|default('')) }}
with_items: "{{ groups.vault }}"
run_once: true
diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml
index b04cd0e23..be49f375d 100644
--- a/roles/vault/tasks/shared/issue_cert.yml
+++ b/roles/vault/tasks/shared/issue_cert.yml
@@ -45,7 +45,7 @@
state: directory
recurse: yes
owner: "vault"
- group: "vault"
+ group: "root"
mode: 0755
- name: gen_certs_vault | install hvac
@@ -76,8 +76,7 @@
run_once: true
- name: "issue_cert | Generate {{ issue_cert_path }} for {{ issue_cert_role }} role"
- #hashivault_write:
- vault_cert_issue:
+ hashivault_write:
url: "{{ issue_cert_url }}"
token: "{{ vault_client_token }}"
ca_cert: "{% if 'https' in issue_cert_url %}{{ vault_cert_dir }}/ca.pem{% endif %}"
@@ -88,6 +87,7 @@
format: "{{ issue_cert_format | d('pem') }}"
ip_sans: "{{ issue_cert_ip_sans | default([]) | join(',') }}"
register: issue_cert_result
+ run_once: "{{ issue_cert_run_once | d(false) }}"
- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
copy:
diff --git a/roles/vault/templates/host.service.j2 b/roles/vault/templates/host.service.j2
index 11bce2f29..28fac1dba 100644
--- a/roles/vault/templates/host.service.j2
+++ b/roles/vault/templates/host.service.j2
@@ -4,7 +4,7 @@ After=network.target
[Service]
AmbientCapabilities=CAP_IPC_LOCK
-ExecStart=/usr/bin/vault server --config={{ vault_config_dir }}/config.json
+ExecStart={{ bin_dir }}/vault server --config={{ vault_config_dir }}/config.json
LimitNOFILE=40000
NotifyAccess=all
Restart=always
diff --git a/roles/vault/templates/rkt.service.j2 b/roles/vault/templates/rkt.service.j2
index 6a4c3d77a..e92221161 100644
--- a/roles/vault/templates/rkt.service.j2
+++ b/roles/vault/templates/rkt.service.j2
@@ -12,26 +12,34 @@ LimitNOFILE=40000
# Container has the following internal mount points:
# /vault/file/ # File backend storage location
# /vault/logs/ # Log files
+ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
+
ExecStart=/usr/bin/rkt run \
---insecure-options=image \
---volume hosts,kind=host,source=/etc/hosts,readOnly=true \
---mount volume=hosts,target=/etc/hosts \
---volume=volume-vault-file,kind=host,source=/var/lib/vault \
---volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
---volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
---mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
---volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
---mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
---volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
---mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
---volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
---mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
---volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
---mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
-docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
---name={{ vault_container_name }} --net=host \
---caps-retain=CAP_IPC_LOCK \
---exec vault -- server --config={{ vault_config_dir }}/config.json
+ --insecure-options=image \
+ --volume hosts,kind=host,source=/etc/hosts,readOnly=true \
+ --mount volume=hosts,target=/etc/hosts \
+ --volume=volume-vault-file,kind=host,source=/var/lib/vault \
+ --volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
+ --volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
+ --mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
+ --volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
+ --mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
+ --volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
+ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
+ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
+ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
+ --volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
+ --mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
+ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
+ --uuid-file-save=/var/run/vault.uuid \
+ --name={{ vault_container_name }} \
+ --net=host \
+ --caps-retain=CAP_IPC_LOCK \
+ --exec vault -- \
+ server \
+ --config={{ vault_config_dir }}/config.json
+
+ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install]
WantedBy=multi-user.target
diff --git a/scale.yml b/scale.yml
index 3f8613011..a80d080b0 100644
--- a/scale.yml
+++ b/scale.yml
@@ -32,7 +32,7 @@
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
- { role: kubernetes/node, tags: node }
diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml
index a410be3f2..199fa437c 100644
--- a/tests/files/gce_centos-weave-kubeadm.yml
+++ b/tests/files/gce_centos-weave-kubeadm.yml
@@ -9,5 +9,6 @@ startup_script: ""
kube_network_plugin: weave
kubeadm_enabled: true
deploy_netchecker: true
+kubernetes_audit: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index 161625946..3dffa338f 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -7,8 +7,8 @@ mode: ha
# Deployment settings
kube_network_plugin: flannel
helm_enabled: true
-istio_enabled: true
efk_enabled: true
+kubernetes_audit: true
etcd_events_cluster_setup: true
local_volume_provisioner_enabled: true
etcd_deployment_type: host
diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml
index 9eae57e2e..e5bea621c 100644
--- a/tests/files/gce_opensuse-canal.yml
+++ b/tests/files/gce_opensuse-canal.yml
@@ -6,7 +6,6 @@ mode: default
# Deployment settings
bootstrap_os: opensuse
kube_network_plugin: canal
-kubeadm_enabled: true
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 9e858acd3..3d75883dd 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -38,7 +38,7 @@
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{proxy_env}}"
- hosts: etcd:k8s-cluster:vault