mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 05:45:06 +03:00
Compare commits
69 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
563be70728 | ||
|
|
a03f3739dc | ||
|
|
bfe78848fa | ||
|
|
126d4e36c8 | ||
|
|
97c4edc028 | ||
|
|
f74c195d47 | ||
|
|
2374878ef7 | ||
|
|
b9e56dd435 | ||
|
|
ede5f9592a | ||
|
|
a6137b3aee | ||
|
|
da3920496d | ||
|
|
895a02e274 | ||
|
|
b4b20c9dbc | ||
|
|
fe8eff07d3 | ||
|
|
941cae2a4c | ||
|
|
4a9a82ca86 | ||
|
|
d2ac5ac54b | ||
|
|
4c2f757fe8 | ||
|
|
e701c3d49d | ||
|
|
5762d8f301 | ||
|
|
9a278bae00 | ||
|
|
d3f35e12a2 | ||
|
|
d7b7db34fa | ||
|
|
4dd85b5078 | ||
|
|
7f73bb5522 | ||
|
|
795ce8468d | ||
|
|
fb6dd60f52 | ||
|
|
e427591545 | ||
|
|
9b8c89ebb0 | ||
|
|
323155b0e1 | ||
|
|
f368faf66b | ||
|
|
8fa7811b63 | ||
|
|
c352df6fc8 | ||
|
|
34419d6bae | ||
|
|
d94bc8e599 | ||
|
|
57e1831f78 | ||
|
|
1a0208f448 | ||
|
|
5319f23e73 | ||
|
|
b45261b763 | ||
|
|
10ade2cbdc | ||
|
|
471dad44b6 | ||
|
|
3f411bffe4 | ||
|
|
5cc29b77aa | ||
|
|
70aa68b9c7 | ||
|
|
7efaf30d36 | ||
|
|
0b164bec02 | ||
|
|
3f8f0f550b | ||
|
|
d6a790ec46 | ||
|
|
8eef0db3ec | ||
|
|
2b3543d0ee | ||
|
|
c997860e1c | ||
|
|
27b0980622 | ||
|
|
3fb9101e40 | ||
|
|
3bf74530ce | ||
|
|
f6e4cc530c | ||
|
|
e85fb0460e | ||
|
|
f0eb963f5e | ||
|
|
f216302f95 | ||
|
|
b98227e9a4 | ||
|
|
f27a3f047f | ||
|
|
8e585cfdfe | ||
|
|
0af0a3517f | ||
|
|
73e240c644 | ||
|
|
533fe3b8e6 | ||
|
|
95403e9d93 | ||
|
|
250ed9d56b | ||
|
|
6381e75769 | ||
|
|
71e4b185c5 | ||
|
|
a3c5be2c9d |
31
.gitmodules
vendored
31
.gitmodules
vendored
@@ -1,30 +1,43 @@
|
||||
[submodule "roles/apps/k8s-kube-ui"]
|
||||
path = roles/apps/k8s-kube-ui
|
||||
url = https://github.com/ansibl8s/k8s-kube-ui.git
|
||||
[submodule "roles/apps/k8s-skydns"]
|
||||
path = roles/apps/k8s-skydns
|
||||
url = https://github.com/ansibl8s/k8s-skydns.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-kubedns"]
|
||||
path = roles/apps/k8s-kubedns
|
||||
url = https://github.com/ansibl8s/k8s-kubedns.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-common"]
|
||||
path = roles/apps/k8s-common
|
||||
url = https://github.com/ansibl8s/k8s-common.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-redis"]
|
||||
path = roles/apps/k8s-redis
|
||||
url = https://github.com/ansibl8s/k8s-redis.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-elasticsearch"]
|
||||
path = roles/apps/k8s-elasticsearch
|
||||
url = https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||
[submodule "roles/apps/k8s-fabric8"]
|
||||
path = roles/apps/k8s-fabric8
|
||||
url = https://github.com/ansibl8s/k8s-fabric8.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-memcached"]
|
||||
path = roles/apps/k8s-memcached
|
||||
url = https://github.com/ansibl8s/k8s-memcached.git
|
||||
[submodule "roles/apps/k8s-haproxy"]
|
||||
path = roles/apps/k8s-haproxy
|
||||
url = https://github.com/ansibl8s/k8s-haproxy.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-postgres"]
|
||||
path = roles/apps/k8s-postgres
|
||||
url = https://github.com/ansibl8s/k8s-postgres.git
|
||||
[submodule "roles/apps/k8s-kubedns"]
|
||||
path = roles/apps/k8s-kubedns
|
||||
url = https://github.com/ansibl8s/k8s-kubedns.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-kubedash"]
|
||||
path = roles/apps/k8s-kubedash
|
||||
url = https://github.com/ansibl8s/k8s-kubedash.git
|
||||
[submodule "roles/apps/k8s-heapster"]
|
||||
path = roles/apps/k8s-heapster
|
||||
url = https://github.com/ansibl8s/k8s-heapster.git
|
||||
[submodule "roles/apps/k8s-influxdb"]
|
||||
path = roles/apps/k8s-influxdb
|
||||
url = https://github.com/ansibl8s/k8s-influxdb.git
|
||||
[submodule "roles/apps/k8s-kube-logstash"]
|
||||
path = roles/apps/k8s-kube-logstash
|
||||
url = https://github.com/ansibl8s/k8s-kube-logstash.git
|
||||
|
||||
136
README.md
136
README.md
@@ -1,7 +1,7 @@
|
||||
kubernetes-ansible
|
||||
========
|
||||
|
||||
Install and configure a kubernetes cluster including network overlay and optionnal addons.
|
||||
Install and configure a kubernetes cluster including network plugin and optionnal addons.
|
||||
Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
|
||||
|
||||
### Requirements
|
||||
@@ -12,17 +12,17 @@ The firewalls are not managed, you'll need to implement your own rules the way y
|
||||
Ansible v1.9.x
|
||||
|
||||
### Components
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.0.6
|
||||
* [etcd](https://github.com/coreos/etcd/releases) v2.2.0
|
||||
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.5.1
|
||||
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.3
|
||||
* [docker](https://www.docker.com/) v1.8.2
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2
|
||||
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
|
||||
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.11.0
|
||||
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
|
||||
* [docker](https://www.docker.com/) v1.8.3
|
||||
|
||||
|
||||
Ansible
|
||||
-------------------------
|
||||
### Download binaries
|
||||
A role allows to download required binaries which will be stored in a directory defined by the variable
|
||||
A role allows to download required binaries. They will be stored in a directory defined by the variable
|
||||
**'local_release_dir'** (by default /tmp).
|
||||
Please ensure that you have enough disk space there (about **1G**).
|
||||
|
||||
@@ -32,6 +32,48 @@ Please ensure that you have enough disk space there (about **1G**).
|
||||
### Variables
|
||||
The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```.
|
||||
|
||||
### Inventory
|
||||
Below is an example of an inventory.
|
||||
Note : The bgp vars local_as and peers are not mandatory if the var **'peer_with_router'** is set to false
|
||||
By default this variable is set to false and therefore all the nodes are configure in **'node-mesh'** mode.
|
||||
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
|
||||
|
||||
```
|
||||
[downloader]
|
||||
10.99.0.26
|
||||
|
||||
[kube-master]
|
||||
10.99.0.26
|
||||
|
||||
[etcd]
|
||||
10.99.0.26
|
||||
|
||||
[kube-node]
|
||||
10.99.0.4
|
||||
10.99.0.5
|
||||
10.99.0.36
|
||||
10.99.0.37
|
||||
|
||||
[paris]
|
||||
10.99.0.26
|
||||
10.99.0.4 local_as=xxxxxxxx
|
||||
10.99.0.5 local_as=xxxxxxxx
|
||||
|
||||
[usa]
|
||||
10.99.0.36 local_as=xxxxxxxx
|
||||
10.99.0.37 local_as=xxxxxxxx
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
[paris:vars]
|
||||
peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}]
|
||||
|
||||
[usa:vars]
|
||||
peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}]
|
||||
```
|
||||
|
||||
### Playbook
|
||||
```
|
||||
---
|
||||
@@ -44,14 +86,12 @@ The main variables to change are located in the directory ```environments/[env_n
|
||||
roles:
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
|
||||
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
||||
- { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
|
||||
|
||||
- hosts: kube-node
|
||||
roles:
|
||||
@@ -69,13 +109,13 @@ Kubernetes
|
||||
-------------------------
|
||||
|
||||
### Network Overlay
|
||||
You can choose between 2 network overlays. Only one must be chosen.
|
||||
You can choose between 2 network plugins. Only one must be chosen.
|
||||
|
||||
* **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel'))
|
||||
|
||||
* **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/'))
|
||||
|
||||
The choice is defined with the variable '**overlay_network_plugin**'
|
||||
The choice is defined with the variable '**kube_network_plugin**'
|
||||
|
||||
### Expose a service
|
||||
There are several loadbalancing solutions.
|
||||
@@ -110,14 +150,20 @@ iptables -nLv -t nat
|
||||
```
|
||||
|
||||
|
||||
#### Available apps, installation procedure
|
||||
### Available apps, installation procedure
|
||||
|
||||
There are two ways of installing new apps
|
||||
|
||||
#### Ansible galaxy
|
||||
|
||||
Additionnal apps can be installed with ```ansible-galaxy```.
|
||||
|
||||
you'll need to edit the file '*requirements.yml*' in order to chose needed apps.
|
||||
ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
|
||||
The list of available apps are available [there](https://github.com/ansibl8s)
|
||||
|
||||
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
|
||||
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
|
||||
In order to use this role you'll need the following entries in the file '*requirements.yml*'
|
||||
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
|
||||
```
|
||||
- src: https://github.com/ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
@@ -139,16 +185,34 @@ Then download the roles with ansible-galaxy
|
||||
ansible-galaxy install -r requirements.yml
|
||||
```
|
||||
|
||||
Finally update your playbook with the chosen role, and run it
|
||||
#### Git submodules
|
||||
Alternatively the roles can be installed as git submodules.
|
||||
That way is easier if you want to do some changes and commit them.
|
||||
|
||||
You can list available submodules with the following command:
|
||||
```
|
||||
grep path .gitmodules | sed 's/.*= //'
|
||||
```
|
||||
|
||||
In order to install the dns addon you'll need to follow these steps
|
||||
```
|
||||
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
|
||||
git submodule update
|
||||
```
|
||||
|
||||
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
|
||||
```
|
||||
...
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
||||
...
|
||||
```
|
||||
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
|
||||
|
||||
```
|
||||
ansible-playbook -i environments/dev/inventory apps.yml -u root
|
||||
```
|
||||
|
||||
|
||||
#### Calico networking
|
||||
Check if the calico-node container is running
|
||||
@@ -173,38 +237,4 @@ calicoctl endpoint show --detail
|
||||
```
|
||||
#### Flannel networking
|
||||
|
||||
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.0/basicstutorials.html)
|
||||
|
||||
Known issues
|
||||
-------------
|
||||
### Node reboot and Calico
|
||||
There is a major issue with calico-kubernetes version 0.5.1 and kubernetes prior to 1.1 :
|
||||
After host reboot, the pods networking are not configured again, they are started without any network configuration.
|
||||
This issue will be fixed when kubernetes 1.1 will be released as described in this [issue](https://github.com/projectcalico/calico-kubernetes/issues/34)
|
||||
|
||||
### Monitoring addon
|
||||
Until now i didn't managed to get the monitoring addon working.
|
||||
|
||||
### Apiserver listen on secure port only
|
||||
Currently the api-server listens on both secure and insecure ports.
|
||||
The insecure port is mainly used for calico.
|
||||
Will be fixed soon.
|
||||
|
||||
How to contribute
|
||||
------------------
|
||||
|
||||
### Update available roles
|
||||
Alternatively the roles can be installed as git submodules.
|
||||
That way is easier if you want to do some changes and commit them.
|
||||
|
||||
You can list available submodules with the following command:
|
||||
```
|
||||
grep path .gitmodules | sed 's/.*= //'
|
||||
```
|
||||
|
||||
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
|
||||
In order to use this role you'll need to follow these steps
|
||||
```
|
||||
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
|
||||
git submodule update
|
||||
```
|
||||
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)
|
||||
|
||||
23
apps.yml
Normal file
23
apps.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
# System
|
||||
- { role: apps/k8s-kubedns, tags: 'kubedns' }
|
||||
|
||||
# Databases
|
||||
- { role: apps/k8s-postgres, tags: 'postgres' }
|
||||
- { role: apps/k8s-elasticsearch, tags: 'es' }
|
||||
- { role: apps/k8s-memcached, tags: 'es' }
|
||||
- { role: apps/k8s-redis, tags: 'es' }
|
||||
|
||||
# Monitoring
|
||||
- { role: apps/k8s-influxdb, tags: 'influxdb'}
|
||||
- { role: apps/k8s-heapster, tags: 'heapster'}
|
||||
- { role: apps/k8s-kubedash, tags: 'kubedash'}
|
||||
|
||||
# logging
|
||||
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
|
||||
|
||||
# Console
|
||||
- { role: apps/k8s-fabric8, tags: 'fabric8' }
|
||||
- { role: apps/k8s-kube-ui, tags: 'kube-ui' }
|
||||
@@ -8,15 +8,12 @@
|
||||
roles:
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
|
||||
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
# Apps to be installed
|
||||
# - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
||||
# - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
|
||||
|
||||
- hosts: kube-node
|
||||
roles:
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
@@ -1,57 +0,0 @@
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_users:
|
||||
kube:
|
||||
pass: changeme
|
||||
role: admin
|
||||
root:
|
||||
pass: changeme
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
#
|
||||
# set this variable to calico if needed. keep it empty if flannel is used
|
||||
overlay_network_plugin: calico
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
overlay_network_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network total size (optional). This is the prefix of the
|
||||
# entire overlay network. So the entirety of 4.0.0.0/16 must be
|
||||
# unused in your environment.
|
||||
# overlay_network_prefix: 18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
overlay_network_host_prefix: 24
|
||||
|
||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
peer_with_router: false
|
||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
# The subnets of each nodes will be distributed by the datacenter router
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
# as it greatly simplifies configuration of your applications - you can use
|
||||
# service names instead of magic environment variables.
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Upstream dns servers used by dnsmasq
|
||||
upstream_dns_servers:
|
||||
- 8.8.8.8
|
||||
- 4.4.8.8
|
||||
|
||||
# Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# Ip address of the kubernetes dns service
|
||||
dns_server: 10.233.0.10
|
||||
@@ -1,36 +0,0 @@
|
||||
[downloader]
|
||||
172.16.0.1
|
||||
|
||||
[kube-master]
|
||||
# NB : the br_addr must be in the {{ calico_pool }} subnet
|
||||
# it will assign a /24 subnet per node
|
||||
172.16.0.1 br_addr=10.233.64.1
|
||||
|
||||
[etcd]
|
||||
172.16.0.1
|
||||
|
||||
[kube-node:children]
|
||||
usa
|
||||
france
|
||||
|
||||
[usa]
|
||||
172.16.0.1 br_addr=10.233.64.1
|
||||
# Configure the as assigned to the each node if bgp peering with border routers is enabled
|
||||
172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
|
||||
172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
|
||||
|
||||
[france]
|
||||
192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
|
||||
192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
# If you want to configure bgp peering with border router you'll need to set the following vars
|
||||
# List of routers and their as number
|
||||
#[usa:vars]
|
||||
#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
|
||||
#
|
||||
#[france:vars]
|
||||
#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]
|
||||
@@ -1,4 +1,4 @@
|
||||
# Directory where the binaries will be installed
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
# cluster_name: cluster.local
|
||||
#
|
||||
|
||||
# set this variable to calico if needed. keep it empty if flannel is used
|
||||
# overlay_network_plugin: calico
|
||||
# kube_network_plugin: calico
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
# kube_service_addresses: 10.233.0.0/18
|
||||
@@ -19,23 +19,26 @@
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
# overlay_network_subnet: 10.233.64.0/18
|
||||
# kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network total size (optional). This is the prefix of the
|
||||
# entire overlay network. So the entirety of 4.0.0.0/16 must be
|
||||
# unused in your environment.
|
||||
# overlay_network_prefix: 18
|
||||
# entire network. Must be unused in your environment.
|
||||
# kube_network_prefix: 18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
# overlay_network_host_prefix: 24
|
||||
# kube_network_node_prefix: 24
|
||||
|
||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
# peer_with_router: false
|
||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
# The subnets of each nodes will be distributed by the datacenter router
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
# kube_master_port: 443 # (https)
|
||||
# kube_master_insecure_port: 8080 # (http)
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
---
|
||||
- src: https://github.com/ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
# version: v1.0
|
||||
version: v1.0
|
||||
|
||||
- src: https://github.com/ansibl8s/k8s-skydns.git
|
||||
- src: https://github.com/ansibl8s/k8s-kubedns.git
|
||||
path: roles/apps
|
||||
# version: v1.0
|
||||
version: v1.0
|
||||
|
||||
#- src: https://github.com/ansibl8s/k8s-kube-ui.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# version: v1.0
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-fabric8.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# version: v1.0
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||
# path: roles/apps
|
||||
@@ -25,12 +25,17 @@
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-memcached.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-haproxy.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# version: v1.0
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-postgres.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# version: v1.0
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-heapster.git
|
||||
# path: roles/apps
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-influxdb.git
|
||||
# path: roles/apps
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-kubedash.git
|
||||
# path: roles/apps
|
||||
|
||||
Submodule roles/apps/k8s-common updated: 1b0318421f...2ef9669d6d
Submodule roles/apps/k8s-elasticsearch updated: 2de264f007...3d74c70a4a
Submodule roles/apps/k8s-haproxy deleted from c17312c4df
1
roles/apps/k8s-heapster
Submodule
1
roles/apps/k8s-heapster
Submodule
Submodule roles/apps/k8s-heapster added at dc088e25ef
1
roles/apps/k8s-influxdb
Submodule
1
roles/apps/k8s-influxdb
Submodule
Submodule roles/apps/k8s-influxdb added at 38d54c48e7
1
roles/apps/k8s-kube-logstash
Submodule
1
roles/apps/k8s-kube-logstash
Submodule
Submodule roles/apps/k8s-kube-logstash added at 256fa156e4
1
roles/apps/k8s-kubedash
Submodule
1
roles/apps/k8s-kubedash
Submodule
Submodule roles/apps/k8s-kubedash added at 64385696a9
Submodule roles/apps/k8s-kubedns updated: 382e1d8bfc...b5015aed8f
Submodule roles/apps/k8s-memcached updated: aad14ddd99...563b35f3b6
Submodule roles/apps/k8s-postgres updated: 582f8ec9f8...e219c91391
Submodule roles/apps/k8s-redis updated: 86495a2152...a4e134fef3
@@ -1,21 +1,4 @@
|
||||
---
|
||||
- name: Write script for calico/docker bridge configuration
|
||||
template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
|
||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
||||
|
||||
- name: Configure calico/docker bridge
|
||||
shell: /etc/network/if-up.d/create_cbr
|
||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
||||
|
||||
- name: Configure docker to use cbr0 bridge
|
||||
lineinfile:
|
||||
dest=/etc/default/docker
|
||||
regexp='.*DOCKER_OPTS=.*'
|
||||
line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
|
||||
notify:
|
||||
- restart docker
|
||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
||||
|
||||
- name: enable docker
|
||||
service:
|
||||
name: docker
|
||||
@@ -24,10 +7,10 @@
|
||||
tags:
|
||||
- docker
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
#- name: login to arkena's docker registry
|
||||
# shell : >
|
||||
# docker login --username={{ dockerhub_user }}
|
||||
# --password={{ dockerhub_pass }}
|
||||
# --email={{ dockerhub_email }}
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
@@ -6,19 +6,19 @@
|
||||
- ca-certificates
|
||||
|
||||
- name: Configure docker apt repository
|
||||
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list
|
||||
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list backup=yes
|
||||
|
||||
- name: Install docker-engine
|
||||
apt: pkg={{ item }} state=present force=yes update_cache=yes
|
||||
with_items:
|
||||
- aufs-tools
|
||||
- cgroupfs-mount
|
||||
- docker-engine=1.8.2-0~{{ ansible_distribution_release }}
|
||||
- docker-engine=1.8.3-0~{{ ansible_distribution_release }}
|
||||
|
||||
- name: Copy default docker configuration
|
||||
template: src=default-docker.j2 dest=/etc/default/docker
|
||||
template: src=default-docker.j2 dest=/etc/default/docker backup=yes
|
||||
notify: restart docker
|
||||
|
||||
- name: Copy Docker systemd unit file
|
||||
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service
|
||||
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service backup=yes
|
||||
notify: restart docker
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create calico bridge cbr0 if it doesn't exist
|
||||
ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
|
||||
if ! [[ "${ifaces}" =~ "cbr0" ]];then
|
||||
brctl addbr cbr0
|
||||
ip link set cbr0 up
|
||||
fi
|
||||
|
||||
# Configure calico bridge ip
|
||||
br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
|
||||
if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
|
||||
ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
|
||||
fi
|
||||
@@ -4,9 +4,7 @@
|
||||
#DOCKER="/usr/local/bin/docker"
|
||||
|
||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
||||
{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %}
|
||||
DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
|
||||
{% endif %}
|
||||
#DOCKER_OPTS=""
|
||||
|
||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
||||
#export http_proxy="http://127.0.0.1:3128/"
|
||||
|
||||
@@ -1 +1 @@
|
||||
deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main
|
||||
deb https://apt.dockerproject.org/repo {{ansible_distribution|lower}}-{{ ansible_distribution_release}} main
|
||||
|
||||
@@ -3,3 +3,11 @@ etcd_download_url: https://github.com/coreos/etcd/releases/download
|
||||
flannel_download_url: https://github.com/coreos/flannel/releases/download
|
||||
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
|
||||
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
|
||||
|
||||
etcd_version: v2.2.2
|
||||
flannel_version: 0.5.5
|
||||
|
||||
kube_version: v1.1.2
|
||||
kube_sha1: 69d110d371752c6492d2f8695aa7a47be5b6ed4e
|
||||
|
||||
calico_version: v0.11.0
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
etcd_version: v2.2.0
|
||||
flannel_version: 0.5.3
|
||||
|
||||
kube_version: v1.0.6
|
||||
kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
|
||||
|
||||
calico_version: v0.5.1
|
||||
@@ -6,7 +6,7 @@
|
||||
file: path=/etc/systemd/system/etcd2.service.d state=directory
|
||||
|
||||
- name: Write etcd2 config file
|
||||
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf
|
||||
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf backup=yes
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart etcd2
|
||||
|
||||
@@ -21,4 +21,5 @@
|
||||
template:
|
||||
src: systemd-etcd2.service.j2
|
||||
dest: /lib/systemd/system/etcd2.service
|
||||
backup: yes
|
||||
notify: restart daemons
|
||||
|
||||
@@ -11,9 +11,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
# look in here. Don't do it.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_master_port: 443
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
||||
|
||||
@@ -33,9 +30,15 @@ kube_cert_group: kube-cert
|
||||
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
kube_proxy_mode: iptables
|
||||
|
||||
# IP address of the DNS server.
|
||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||
# server and expose it under this IP address. The IP address must be from
|
||||
# the range specified as kube_service_addresses. This magic will actually
|
||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
||||
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
|
||||
|
||||
# kube_api_runtime_config:
|
||||
# - extensions/v1beta1/daemonsets=true
|
||||
# - extensions/v1beta1/deployments=true
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
args:
|
||||
creates: "{{ kube_cert_dir }}/server.crt"
|
||||
environment:
|
||||
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
||||
MASTER_IP: "{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}"
|
||||
MASTER_NAME: "{{ inventory_hostname }}"
|
||||
DNS_DOMAIN: "{{ dns_domain }}"
|
||||
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
|
||||
|
||||
@@ -45,6 +45,10 @@
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- debug: msg="{{groups['kube-master'][0]}} == {{inventory_hostname}}"
|
||||
tags:
|
||||
- debug
|
||||
|
||||
- include: gen_tokens.yml
|
||||
run_once: true
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
1315
roles/kubernetes/master/files/kubectl_bash_completion.sh
Normal file
1315
roles/kubernetes/master/files/kubectl_bash_completion.sh
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,30 +3,54 @@
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart apiserver
|
||||
- restart controller-manager
|
||||
- restart scheduler
|
||||
- restart proxy
|
||||
- restart reloaded-scheduler
|
||||
- restart reloaded-controller-manager
|
||||
- restart reloaded-apiserver
|
||||
- restart reloaded-proxy
|
||||
|
||||
- name: reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: restart apiserver
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-apiserver
|
||||
|
||||
- name: restart reloaded-apiserver
|
||||
service:
|
||||
name: kube-apiserver
|
||||
state: restarted
|
||||
|
||||
- name: restart controller-manager
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-controller-manager
|
||||
|
||||
- name: restart reloaded-controller-manager
|
||||
service:
|
||||
name: kube-controller-manager
|
||||
state: restarted
|
||||
|
||||
- name: restart scheduler
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-scheduler
|
||||
|
||||
- name: restart reloaded-scheduler
|
||||
service:
|
||||
name: kube-scheduler
|
||||
state: restarted
|
||||
|
||||
- name: restart proxy
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-proxy
|
||||
|
||||
- name: restart reloaded-proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
state: restarted
|
||||
|
||||
@@ -18,56 +18,57 @@
|
||||
proxy_token: "{{ tokens.results[3].content|b64decode }}"
|
||||
|
||||
- name: write the config files for api server
|
||||
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
|
||||
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes
|
||||
notify:
|
||||
- restart daemons
|
||||
- restart apiserver
|
||||
|
||||
- name: write config file for controller-manager
|
||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the kubecfg (auth) file for controller-manager
|
||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig backup=yes
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the config file for scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler backup=yes
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for scheduler
|
||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig backup=yes
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for kubectl
|
||||
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
|
||||
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig backup=yes
|
||||
|
||||
- name: write the config files for proxy
|
||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||
- name: Copy kubectl bash completion
|
||||
copy: src=kubectl_bash_completion.sh dest=/etc/bash_completion.d/kubectl.sh
|
||||
|
||||
- name: Create proxy environment vars dir
|
||||
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
|
||||
|
||||
- name: Write proxy config file
|
||||
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
|
||||
notify:
|
||||
- restart daemons
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
|
||||
|
||||
- name: populate users for basic auth in API
|
||||
lineinfile:
|
||||
dest: "{{ kube_users_dir }}/known_users.csv"
|
||||
create: yes
|
||||
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
|
||||
backup: yes
|
||||
with_dict: "{{ kube_users }}"
|
||||
notify:
|
||||
- restart apiserver
|
||||
|
||||
- name: Enable apiserver
|
||||
service:
|
||||
name: kube-apiserver
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable controller-manager
|
||||
service:
|
||||
name: kube-controller-manager
|
||||
@@ -85,3 +86,9 @@
|
||||
name: kube-proxy
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable apiserver
|
||||
service:
|
||||
name: kube-apiserver
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
---
|
||||
- name: Write kube-apiserver systemd init file
|
||||
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
|
||||
notify: restart daemons
|
||||
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes
|
||||
notify: restart apiserver
|
||||
|
||||
- name: Write kube-controller-manager systemd init file
|
||||
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
|
||||
notify: restart daemons
|
||||
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes
|
||||
notify: restart controller-manager
|
||||
|
||||
- name: Write kube-scheduler systemd init file
|
||||
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
|
||||
notify: restart daemons
|
||||
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes
|
||||
notify: restart scheduler
|
||||
|
||||
- name: Write kube-proxy systemd init file
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||
notify: restart daemons
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
|
||||
notify: restart proxy
|
||||
|
||||
- name: Install kubernetes binaries
|
||||
copy:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
|
||||
|
||||
# The port on the local server to listen on.
|
||||
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
|
||||
KUBE_API_PORT="--insecure-port={{kube_master_insecure_port}} --secure-port={{ kube_master_port }}"
|
||||
|
||||
# KUBELET_PORT="--kubelet_port=10250"
|
||||
|
||||
@@ -21,5 +21,8 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node
|
||||
# default admission control policies
|
||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
||||
|
||||
# RUNTIME API CONFIGURATION (e.g. enable extensions)
|
||||
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
|
||||
|
||||
# Add you own!
|
||||
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"
|
||||
|
||||
@@ -4,4 +4,5 @@
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
||||
[Service]
|
||||
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"
|
||||
|
||||
@@ -10,7 +10,7 @@ contexts:
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['kube-master'][0] }}:8080
|
||||
server: http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}
|
||||
name: {{ cluster_name }}
|
||||
users:
|
||||
- name: proxy
|
||||
|
||||
@@ -19,6 +19,7 @@ ExecStart={{ bin_dir }}/kube-apiserver \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBE_SERVICE_ADDRESSES \
|
||||
$KUBE_ADMISSION_CONTROL \
|
||||
$KUBE_RUNTIME_CONFIG \
|
||||
$KUBE_API_ARGS
|
||||
Restart=on-failure
|
||||
Type=notify
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kube-Proxy Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/config
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kube-proxy \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
|
||||
@@ -2,18 +2,31 @@
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart kubelet
|
||||
- restart proxy
|
||||
- reload systemd
|
||||
- restart reloaded-kubelet
|
||||
- restart reloaded-proxy
|
||||
|
||||
- name: reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: restart kubelet
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-kubelet
|
||||
|
||||
- name: restart reloaded-kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
|
||||
- name: restart proxy
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart reloaded-proxy
|
||||
|
||||
- name: restart reloaded-proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
state: restarted
|
||||
|
||||
- name: reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
@@ -18,13 +18,12 @@
|
||||
file: path=/etc/systemd/system/kubelet.service.d state=directory
|
||||
|
||||
- name: Write kubelet config file
|
||||
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
|
||||
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart kubelet
|
||||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig backup=yes
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
@@ -32,13 +31,12 @@
|
||||
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
|
||||
|
||||
- name: Write proxy config file
|
||||
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
|
||||
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for kube-proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: Write kube-proxy systemd init file
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
|
||||
notify: restart daemons
|
||||
|
||||
- name: Write kubelet systemd init file
|
||||
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
|
||||
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
|
||||
notify: restart daemons
|
||||
|
||||
- name: Install kubernetes binaries
|
||||
|
||||
@@ -16,6 +16,6 @@ Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns
|
||||
{% else %}
|
||||
Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% endif %}
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ overlay_network_plugin }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||
Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ kube_network_plugin }}"
|
||||
{% endif %}
|
||||
|
||||
@@ -5,7 +5,7 @@ preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['kube-master'][0] }}:443
|
||||
server: https://{{ groups['kube-master'][0] }}:{{kube_master_port}}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
|
||||
# default config should be adequate
|
||||
[Service]
|
||||
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
||||
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kube-Proxy Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/config
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kube-proxy \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
#WorkingDirectory=/var/lib/kubelet
|
||||
EnvironmentFile=/etc/kubernetes/config
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kubelet \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
|
||||
@@ -11,36 +11,36 @@
|
||||
|
||||
- name: Calico | Write calico-node systemd init file
|
||||
template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
|
||||
register: newservice
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart calico-node
|
||||
|
||||
- name: Calico | Write network-environment
|
||||
template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart calico-node
|
||||
- name: Calico | daemon-reload
|
||||
command: systemctl daemon-reload
|
||||
when: newservice|changed
|
||||
changed_when: False
|
||||
|
||||
- name: Calico | Enable calico-node
|
||||
service: name=calico-node enabled=yes state=started
|
||||
|
||||
- name: Calico | Configure calico-node desired pool
|
||||
shell: calicoctl pool add {{ kube_pods_subnet }}
|
||||
environment:
|
||||
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
|
||||
run_once: true
|
||||
|
||||
- name: Calico | Configure calico-node remove default pool
|
||||
shell: calicoctl pool remove 192.168.0.0/16
|
||||
environment:
|
||||
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
|
||||
run_once: true
|
||||
|
||||
- name: Calico | Configure calico-node desired pool
|
||||
shell: calicoctl pool add {{ overlay_network_subnet }}
|
||||
environment:
|
||||
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
|
||||
run_once: true
|
||||
|
||||
- name: Calico | Disable node mesh
|
||||
shell: calicoctl bgp node-mesh off
|
||||
when: peer_with_router and inventory_hostname in groups['kube-node']
|
||||
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
|
||||
|
||||
- name: Calico | Configure peering with router(s)
|
||||
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
|
||||
with_items: peers
|
||||
when: peer_with_router and inventory_hostname in groups['kube-node']
|
||||
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
|
||||
17
roles/network_plugin/tasks/main.yml
Normal file
17
roles/network_plugin/tasks/main.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: "Test if network plugin is defined"
|
||||
fail: msg="ERROR, One network_plugin variable must be defined (Flannel or Calico)"
|
||||
when: ( kube_network_plugin is defined and kube_network_plugin == "calico" and kube_network_plugin == "flannel" ) or
|
||||
kube_network_plugin is not defined
|
||||
|
||||
- include: flannel.yml
|
||||
when: kube_network_plugin == "flannel"
|
||||
|
||||
- name: Calico | Write network-environment
|
||||
template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
|
||||
when: kube_network_plugin == "calico"
|
||||
|
||||
- include: calico.yml
|
||||
when: kube_network_plugin == "calico"
|
||||
|
||||
- meta: flush_handlers
|
||||
20
roles/network_plugin/templates/calico/calico-node.service.j2
Normal file
20
roles/network_plugin/templates/calico/calico-node.service.j2
Normal file
@@ -0,0 +1,20 @@
|
||||
[Unit]
|
||||
Description=Calico per-node agent
|
||||
Documentation=https://github.com/projectcalico/calico-docker
|
||||
Requires=docker.service
|
||||
After=docker.service etcd2.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/network-environment
|
||||
User=root
|
||||
PermissionsStartOnly=true
|
||||
{% if inventory_hostname in groups['kube-node'] and peer_with_router|default(false)%}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --kubernetes --ip=${DEFAULT_IPV4} --as={{ local_as }} --detach=false
|
||||
{% else %}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --kubernetes --ip=${DEFAULT_IPV4} --detach=false
|
||||
{% endif %}
|
||||
Restart=always
|
||||
Restart=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,7 +1,7 @@
|
||||
#! /usr/bin/bash
|
||||
# This node's IPv4 address
|
||||
CALICO_IPAM=true
|
||||
DEFAULT_IPV4={{ ansible_default_ipv4.address }}
|
||||
DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
|
||||
|
||||
{% if inventory_hostname in groups['kube-node'] %}
|
||||
# The kubernetes master IP
|
||||
@@ -12,7 +12,7 @@ KUBERNETES_MASTER={{ groups['kube-master'][0] }}
|
||||
ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001
|
||||
|
||||
# The kubernetes-apiserver location - used by the calico plugin
|
||||
KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:8080/api/v1/
|
||||
KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}/api/v1/
|
||||
|
||||
# Location of the calicoctl binary - used by the calico plugin
|
||||
CALICOCTL_PATH="{{ bin_dir }}/calicoctl"
|
||||
@@ -0,0 +1 @@
|
||||
{ "Network": "{{ kube_service_addresses }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "vxlan" } }
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
- name: "Test if overlay network is defined"
|
||||
fail: msg="ERROR, One overlay_network variable must be defined (Flannel or Calico)"
|
||||
when: ( overlay_network_plugin is defined and overlay_network_plugin == "calico" and overlay_network_plugin == "flannel" ) or
|
||||
overlay_network_plugin is not defined
|
||||
|
||||
- include: flannel.yml
|
||||
when: overlay_network_plugin == "flannel"
|
||||
- include: calico.yml
|
||||
when: overlay_network_plugin == "calico"
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
[Unit]
|
||||
Description=calicoctl node
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/network-environment
|
||||
User=root
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
|
||||
{% if inventory_hostname in groups['kube-node'] %}
|
||||
{% if peer_with_router %}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --as={{ local_as }} --kubernetes
|
||||
{% else %}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
|
||||
{% endif %}
|
||||
{% else %}
|
||||
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
|
||||
{% endif %}
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1 +0,0 @@
|
||||
{ "Network": "{{ kube_service_addresses }}", "SubnetLen": {{ overlay_network_host_prefix }}, "Backend": { "Type": "vxlan" } }
|
||||
Reference in New Issue
Block a user