Compare commits

...

69 Commits
v1.0 ... v1.1

Author SHA1 Message Date
Smaine Kahlouch
563be70728 disable bgp for master 2015-12-03 15:38:44 +01:00
Smaine Kahlouch
a03f3739dc Add kubectl bash completion, missing script 2015-12-01 15:45:31 +01:00
Smaine Kahlouch
bfe78848fa Add kubectl bash completion 2015-12-01 12:13:22 +01:00
Smaine Kahlouch
126d4e36c8 Fix kube-proxy on master 2015-11-30 16:41:22 +01:00
Smaine Kahlouch
97c4edc028 Add api runtime config option, review kubernetes handlers 2015-11-27 12:32:31 +01:00
Smaine Kahlouch
f74c195d47 updated submodule postgres 2015-11-26 14:16:49 +01:00
Smaine Kahlouch
2374878ef7 Useless tag 'apps' 2015-11-26 09:37:39 +01:00
Smaine Kahlouch
b9e56dd435 Update postgres submodule 2015-11-26 09:34:37 +01:00
ant31
ede5f9592a Add kube-logstash submodule 2015-11-25 14:49:20 +01:00
ant31
a6137b3aee kube-logstash 2015-11-25 14:47:05 +01:00
Smaine Kahlouch
da3920496d add missing vars file 2015-11-24 16:55:53 +01:00
Smaine Kahlouch
895a02e274 change calico pool configuration order 2015-11-22 22:32:45 +01:00
Smaine Kahlouch
b4b20c9dbc Update readme, inventory ex 2015-11-22 18:25:36 +01:00
Smaine Kahlouch
fe8eff07d3 finalize merge kube_1.1.2 2015-11-22 18:15:45 +01:00
Smaine Kahlouch
941cae2a4c README update, 1 distinct playbook for apps 2015-11-22 18:07:52 +01:00
Smaine Kahlouch
4a9a82ca86 include kubernetes config 2015-11-22 18:04:50 +01:00
Smaine Kahlouch
d2ac5ac54b Update requirements.yml file 2015-11-22 18:01:25 +01:00
Smaine Kahlouch
4c2f757fe8 Add kubedash and monitoring submodule 2015-11-22 18:01:25 +01:00
Smaine Kahlouch
e701c3d49d Update README with the current calico version 2015-11-22 13:37:27 +01:00
Smaine Kahlouch
5762d8f301 upgrade flannel and etcd version 2015-11-22 13:35:00 +01:00
Smaine Kahlouch
9a278bae00 Update README with the latest version and simply inventory 2015-11-22 13:34:29 +01:00
Smaine Kahlouch
d3f35e12a2 Simplify docker role, cbr0 for calico isn't required anymore 2015-11-22 13:33:13 +01:00
Smaine Kahlouch
d7b7db34fa move task service kube-api to the end of role master 2015-11-21 17:01:43 +01:00
Smaine Kahlouch
4dd85b5078 move task service kube-api to the end of role master 2015-11-21 17:00:41 +01:00
Antoine Legrand
7f73bb5522 Keep workaround 2015-11-21 14:04:42 +01:00
Smaine Kahlouch
795ce8468d Calico systemd unit improvement (status, stop) 2015-11-21 13:20:39 +01:00
ant31
fb6dd60f52 Rollback 1.8.3 docker 2015-11-20 16:49:02 +01:00
Smaine Kahlouch
e427591545 upgrade kubernetes version to 1.1.2 2015-11-20 16:48:50 +01:00
ant31
9b8c89ebb0 Simplify inventory 2015-11-20 14:31:49 +01:00
ant31
323155b0e1 Fix docker 2015-11-20 14:04:13 +01:00
ant31
f368faf66b Remove --kube-plugin-version 2015-11-20 11:56:16 +01:00
ant31
8fa7811b63 Remove workaround 2015-11-20 11:36:32 +01:00
ant31
c352df6fc8 Add Backup 2015-11-20 11:18:37 +01:00
Smaine Kahlouch
34419d6bae README update, 1 distinct playbook for apps 2015-11-20 11:01:50 +01:00
Smaine Kahlouch
d94bc8e599 Merge pull request #13 from ansibl8s/separate_apps_playbook
Separate apps deploy from cluster deploy
2015-11-20 10:54:46 +01:00
Antoine Legrand
57e1831f78 Update calico to 0.11.0 2015-11-20 10:38:39 +01:00
ant31
1a0208f448 Separate apps deploy from cluster deploy 2015-11-19 22:49:02 +01:00
Smaine Kahlouch
5319f23e73 include kubernetes config 2015-11-18 22:36:56 +01:00
Smaine Kahlouch
b45261b763 remove duplicate task 2015-11-18 21:38:27 +01:00
Smaine Kahlouch
10ade2cbdc Update requirements.yml file 2015-11-18 16:00:47 +01:00
Smaine Kahlouch
471dad44b6 Add kubedash and monitoring submodule 2015-11-18 15:56:13 +01:00
Smaine Kahlouch
3f411bffe4 include config file into systemd unit file 2015-11-16 22:22:19 +01:00
Smaine Kahlouch
5cc29b77aa add option proxy mode iptables for better performances 2015-11-16 22:21:17 +01:00
Smaine Kahlouch
70aa68b9c7 move task network-environment 2015-11-16 22:20:41 +01:00
Smaine Kahlouch
7efaf30d36 update calico-node command line for version 0.10.0 2015-11-16 22:19:19 +01:00
Smaine Kahlouch
0b164bec02 add option proxy mode iptables for better performances 2015-11-16 22:17:21 +01:00
Smaine Kahlouch
3f8f0f550b remove duplicate task 2015-11-16 22:16:36 +01:00
Smaine Kahlouch
d6a790ec46 default docker template condition 2015-11-16 22:15:43 +01:00
Smaine Kahlouch
8eef0db3ec upgrade binaries version 2015-11-16 22:15:12 +01:00
Smaine Kahlouch
2b3543d0ee Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-11-02 13:46:23 +01:00
Smaine Kahlouch
c997860e1c move vars for api socket into group_vars 2015-11-02 13:46:08 +01:00
Smaine Kahlouch
27b0980622 Merge pull request #11 from ansibl8s/replace_default_ipv4_by_var
Add IP var
2015-11-02 13:41:55 +01:00
Smaine Kahlouch
3fb9101e40 default value for 'peer_with_router' 2015-11-02 13:41:03 +01:00
ant31
3bf74530ce Add IP var 2015-11-01 11:12:12 +01:00
Smaine Kahlouch
f6e4cc530c manage default value for 'peer_with_router' var 2015-10-30 16:18:39 +01:00
Smaine Kahlouch
e85fb0460e change docker version in the README 2015-10-28 10:49:09 +01:00
Smaine Kahlouch
f0eb963f5e Tag v1.0 of redis 2015-10-28 10:44:38 +01:00
Smaine Kahlouch
f216302f95 Calico is not a network overlay 2015-10-27 15:49:07 +01:00
Smaine Kahlouch
b98227e9a4 update submodules postgres and kubedns with changes 2015-10-23 16:39:15 +02:00
Smaine Kahlouch
f27a3f047f Update playbook example on README 2015-10-23 16:38:09 +02:00
Smaine Kahlouch
8e585cfdfe agencing vars into submodules 2015-10-23 09:54:44 +02:00
Smaine Kahlouch
0af0a3517f Running apps after cluster setup, update README 2015-10-21 14:05:02 +02:00
Smaine Kahlouch
73e240c644 Running apps after cluster setup 2015-10-21 14:03:39 +02:00
Smaine Kahlouch
533fe3b8e6 Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-10-20 10:19:06 +02:00
Smaine Kahlouch
95403e9d93 Update README 2015-10-20 10:18:30 +02:00
Smaine Kahlouch
250ed9d56b change skydns to kubedns in the requirements 2015-10-19 14:40:16 +02:00
Smaine Kahlouch
6381e75769 move k8s-postgres tag 2015-10-19 11:11:40 +02:00
Smaine Kahlouch
71e4b185c5 duplicate kubedns in .gitmodules 2015-10-18 22:38:14 +02:00
Smaine Kahlouch
a3c5be2c9d tag first version of apps 2015-10-18 22:32:33 +02:00
63 changed files with 1671 additions and 356 deletions

31
.gitmodules vendored
View File

@@ -1,30 +1,43 @@
[submodule "roles/apps/k8s-kube-ui"] [submodule "roles/apps/k8s-kube-ui"]
path = roles/apps/k8s-kube-ui path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git url = https://github.com/ansibl8s/k8s-kube-ui.git
[submodule "roles/apps/k8s-skydns"] branch = v1.0
path = roles/apps/k8s-skydns [submodule "roles/apps/k8s-kubedns"]
url = https://github.com/ansibl8s/k8s-skydns.git path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
[submodule "roles/apps/k8s-common"] [submodule "roles/apps/k8s-common"]
path = roles/apps/k8s-common path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
[submodule "roles/apps/k8s-redis"] [submodule "roles/apps/k8s-redis"]
path = roles/apps/k8s-redis path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
[submodule "roles/apps/k8s-elasticsearch"] [submodule "roles/apps/k8s-elasticsearch"]
path = roles/apps/k8s-elasticsearch path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git url = https://github.com/ansibl8s/k8s-elasticsearch.git
[submodule "roles/apps/k8s-fabric8"] [submodule "roles/apps/k8s-fabric8"]
path = roles/apps/k8s-fabric8 path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
[submodule "roles/apps/k8s-memcached"] [submodule "roles/apps/k8s-memcached"]
path = roles/apps/k8s-memcached path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git url = https://github.com/ansibl8s/k8s-memcached.git
[submodule "roles/apps/k8s-haproxy"] branch = v1.0
path = roles/apps/k8s-haproxy
url = https://github.com/ansibl8s/k8s-haproxy.git
[submodule "roles/apps/k8s-postgres"] [submodule "roles/apps/k8s-postgres"]
path = roles/apps/k8s-postgres path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git url = https://github.com/ansibl8s/k8s-postgres.git
[submodule "roles/apps/k8s-kubedns"] branch = v1.0
path = roles/apps/k8s-kubedns [submodule "roles/apps/k8s-kubedash"]
url = https://github.com/ansibl8s/k8s-kubedns.git path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
[submodule "roles/apps/k8s-heapster"]
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
[submodule "roles/apps/k8s-influxdb"]
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
[submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git

136
README.md
View File

@@ -1,7 +1,7 @@
kubernetes-ansible kubernetes-ansible
======== ========
Install and configure a kubernetes cluster including network overlay and optionnal addons. Install and configure a kubernetes cluster including network plugin and optionnal addons.
Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work. Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
### Requirements ### Requirements
@@ -12,17 +12,17 @@ The firewalls are not managed, you'll need to implement your own rules the way y
Ansible v1.9.x Ansible v1.9.x
### Components ### Components
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.0.6 * [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2
* [etcd](https://github.com/coreos/etcd/releases) v2.2.0 * [etcd](https://github.com/coreos/etcd/releases) v2.2.2
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.5.1 * [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.11.0
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.3 * [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
* [docker](https://www.docker.com/) v1.8.2 * [docker](https://www.docker.com/) v1.8.3
Ansible Ansible
------------------------- -------------------------
### Download binaries ### Download binaries
A role allows to download required binaries which will be stored in a directory defined by the variable A role allows to download required binaries. They will be stored in a directory defined by the variable
**'local_release_dir'** (by default /tmp). **'local_release_dir'** (by default /tmp).
Please ensure that you have enough disk space there (about **1G**). Please ensure that you have enough disk space there (about **1G**).
@@ -32,6 +32,48 @@ Please ensure that you have enough disk space there (about **1G**).
### Variables ### Variables
The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```. The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```.
### Inventory
Below is an example of an inventory.
Note : The bgp vars local_as and peers are not mandatory if the var **'peer_with_router'** is set to false
By default this variable is set to false and therefore all the nodes are configure in **'node-mesh'** mode.
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
```
[downloader]
10.99.0.26
[kube-master]
10.99.0.26
[etcd]
10.99.0.26
[kube-node]
10.99.0.4
10.99.0.5
10.99.0.36
10.99.0.37
[paris]
10.99.0.26
10.99.0.4 local_as=xxxxxxxx
10.99.0.5 local_as=xxxxxxxx
[usa]
10.99.0.36 local_as=xxxxxxxx
10.99.0.37 local_as=xxxxxxxx
[k8s-cluster:children]
kube-node
kube-master
[paris:vars]
peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}]
[usa:vars]
peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}]
```
### Playbook ### Playbook
``` ```
--- ---
@@ -44,14 +86,12 @@ The main variables to change are located in the directory ```environments/[env_n
roles: roles:
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- { role: docker, tags: docker } - { role: docker, tags: docker }
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] } - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- { role: dnsmasq, tags: dnsmasq } - { role: dnsmasq, tags: dnsmasq }
- hosts: kube-master - hosts: kube-master
roles: roles:
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
- { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node - hosts: kube-node
roles: roles:
@@ -69,13 +109,13 @@ Kubernetes
------------------------- -------------------------
### Network Overlay ### Network Overlay
You can choose between 2 network overlays. Only one must be chosen. You can choose between 2 network plugins. Only one must be chosen.
* **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel')) * **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel'))
* **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/')) * **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/'))
The choice is defined with the variable '**overlay_network_plugin**' The choice is defined with the variable '**kube_network_plugin**'
### Expose a service ### Expose a service
There are several loadbalancing solutions. There are several loadbalancing solutions.
@@ -110,14 +150,20 @@ iptables -nLv -t nat
``` ```
#### Available apps, installation procedure ### Available apps, installation procedure
There are two ways of installing new apps
#### Ansible galaxy
Additionnal apps can be installed with ```ansible-galaxy```. Additionnal apps can be installed with ```ansible-galaxy```.
you'll need to edit the file '*requirements.yml*' in order to chose needed apps. ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
The list of available apps are available [there](https://github.com/ansibl8s) The list of available apps are available [there](https://github.com/ansibl8s)
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**. For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
In order to use this role you'll need the following entries in the file '*requirements.yml*' In order to use this role you'll need the following entries in the file '*requirements.yml*'
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
``` ```
- src: https://github.com/ansibl8s/k8s-common.git - src: https://github.com/ansibl8s/k8s-common.git
path: roles/apps path: roles/apps
@@ -139,16 +185,34 @@ Then download the roles with ansible-galaxy
ansible-galaxy install -r requirements.yml ansible-galaxy install -r requirements.yml
``` ```
Finally update your playbook with the chosen role, and run it #### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
In order to install the dns addon you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
``` ```
... ...
- hosts: kube-master - hosts: kube-master
roles: roles:
- { role: kubernetes/master, tags: master }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] } - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
... ...
``` ```
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
```
ansible-playbook -i environments/dev/inventory apps.yml -u root
```
#### Calico networking #### Calico networking
Check if the calico-node container is running Check if the calico-node container is running
@@ -173,38 +237,4 @@ calicoctl endpoint show --detail
``` ```
#### Flannel networking #### Flannel networking
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.0/basicstutorials.html) Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)
Known issues
-------------
### Node reboot and Calico
There is a major issue with calico-kubernetes version 0.5.1 and kubernetes prior to 1.1 :
After host reboot, the pods networking are not configured again, they are started without any network configuration.
This issue will be fixed when kubernetes 1.1 will be released as described in this [issue](https://github.com/projectcalico/calico-kubernetes/issues/34)
### Monitoring addon
Until now i didn't managed to get the monitoring addon working.
### Apiserver listen on secure port only
Currently the api-server listens on both secure and insecure ports.
The insecure port is mainly used for calico.
Will be fixed soon.
How to contribute
------------------
### Update available roles
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
In order to use this role you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```

23
apps.yml Normal file
View File

@@ -0,0 +1,23 @@
---
- hosts: kube-master
roles:
# System
- { role: apps/k8s-kubedns, tags: 'kubedns' }
# Databases
- { role: apps/k8s-postgres, tags: 'postgres' }
- { role: apps/k8s-elasticsearch, tags: 'es' }
- { role: apps/k8s-memcached, tags: 'es' }
- { role: apps/k8s-redis, tags: 'es' }
# Monitoring
- { role: apps/k8s-influxdb, tags: 'influxdb'}
- { role: apps/k8s-heapster, tags: 'heapster'}
- { role: apps/k8s-kubedash, tags: 'kubedash'}
# logging
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
# Console
- { role: apps/k8s-fabric8, tags: 'fabric8' }
- { role: apps/k8s-kube-ui, tags: 'kube-ui' }

View File

@@ -8,15 +8,12 @@
roles: roles:
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- { role: docker, tags: docker } - { role: docker, tags: docker }
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] } - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- { role: dnsmasq, tags: dnsmasq } - { role: dnsmasq, tags: dnsmasq }
- hosts: kube-master - hosts: kube-master
roles: roles:
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
# Apps to be installed
# - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
# - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node - hosts: kube-node
roles: roles:

View File

@@ -1,6 +0,0 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"

View File

@@ -1,57 +0,0 @@
# Users to create for basic auth in Kubernetes API via HTTP
kube_users:
kube:
pass: changeme
role: admin
root:
pass: changeme
role: admin
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used
overlay_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
overlay_network_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be
# unused in your environment.
# overlay_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
overlay_network_host_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
upstream_dns_servers:
- 8.8.8.8
- 4.4.8.8
# Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true
dns_domain: "{{ cluster_name }}"
# Ip address of the kubernetes dns service
dns_server: 10.233.0.10

View File

@@ -1,36 +0,0 @@
[downloader]
172.16.0.1
[kube-master]
# NB : the br_addr must be in the {{ calico_pool }} subnet
# it will assign a /24 subnet per node
172.16.0.1 br_addr=10.233.64.1
[etcd]
172.16.0.1
[kube-node:children]
usa
france
[usa]
172.16.0.1 br_addr=10.233.64.1
# Configure the as assigned to the each node if bgp peering with border routers is enabled
172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
[france]
192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
[k8s-cluster:children]
kube-node
kube-master
# If you want to configure bgp peering with border router you'll need to set the following vars
# List of routers and their as number
#[usa:vars]
#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
#
#[france:vars]
#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]

View File

@@ -9,9 +9,9 @@
# Kubernetes cluster name, also will be used as DNS domain # Kubernetes cluster name, also will be used as DNS domain
# cluster_name: cluster.local # cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used # set this variable to calico if needed. keep it empty if flannel is used
# overlay_network_plugin: calico # kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space. # Kubernetes internal network for services, unused block of space.
# kube_service_addresses: 10.233.0.0/18 # kube_service_addresses: 10.233.0.0/18
@@ -19,23 +19,26 @@
# internal network. When used, it will assign IP # internal network. When used, it will assign IP
# addresses from this range to individual pods. # addresses from this range to individual pods.
# This network must be unused in your network infrastructure! # This network must be unused in your network infrastructure!
# overlay_network_subnet: 10.233.64.0/18 # kube_pods_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the # internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be # entire network. Must be unused in your environment.
# unused in your environment. # kube_network_prefix: 18
# overlay_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated # internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have # to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node. # room for 4096 nodes with 254 pods per node.
# overlay_network_host_prefix: 24 # kube_network_node_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter. # With calico it is possible to distributed routes with border routers of the datacenter.
# peer_with_router: false # peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh'). # Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router # The subnets of each nodes will be distributed by the datacenter router
# The port the API Server will be listening on.
# kube_master_port: 443 # (https)
# kube_master_insecure_port: 8080 # (http)
# Internal DNS configuration. # Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names # Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server, # into appropriate IP addresses. It's highly advisable to run such DNS server,

View File

@@ -1,19 +1,19 @@
--- ---
- src: https://github.com/ansibl8s/k8s-common.git - src: https://github.com/ansibl8s/k8s-common.git
path: roles/apps path: roles/apps
# version: v1.0 version: v1.0
- src: https://github.com/ansibl8s/k8s-skydns.git - src: https://github.com/ansibl8s/k8s-kubedns.git
path: roles/apps path: roles/apps
# version: v1.0 version: v1.0
#- src: https://github.com/ansibl8s/k8s-kube-ui.git #- src: https://github.com/ansibl8s/k8s-kube-ui.git
# path: roles/apps # path: roles/apps
# # version: v1.0 # version: v1.0
# #
#- src: https://github.com/ansibl8s/k8s-fabric8.git #- src: https://github.com/ansibl8s/k8s-fabric8.git
# path: roles/apps # path: roles/apps
# # version: v1.0 # version: v1.0
# #
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git #- src: https://github.com/ansibl8s/k8s-elasticsearch.git
# path: roles/apps # path: roles/apps
@@ -25,12 +25,17 @@
# #
#- src: https://github.com/ansibl8s/k8s-memcached.git #- src: https://github.com/ansibl8s/k8s-memcached.git
# path: roles/apps # path: roles/apps
# # version: v1.0 # version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-haproxy.git
# path: roles/apps
# # version: v1.0
# #
#- src: https://github.com/ansibl8s/k8s-postgres.git #- src: https://github.com/ansibl8s/k8s-postgres.git
# path: roles/apps # path: roles/apps
# # version: v1.0 # version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-heapster.git
# path: roles/apps
#
#- src: https://github.com/ansibl8s/k8s-influxdb.git
# path: roles/apps
#
#- src: https://github.com/ansibl8s/k8s-kubedash.git
# path: roles/apps

View File

@@ -1,21 +1,4 @@
--- ---
- name: Write script for calico/docker bridge configuration
template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure calico/docker bridge
shell: /etc/network/if-up.d/create_cbr
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure docker to use cbr0 bridge
lineinfile:
dest=/etc/default/docker
regexp='.*DOCKER_OPTS=.*'
line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
notify:
- restart docker
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: enable docker - name: enable docker
service: service:
name: docker name: docker
@@ -24,10 +7,10 @@
tags: tags:
- docker - docker
- meta: flush_handlers
#- name: login to arkena's docker registry #- name: login to arkena's docker registry
# shell : > # shell : >
# docker login --username={{ dockerhub_user }} # docker login --username={{ dockerhub_user }}
# --password={{ dockerhub_pass }} # --password={{ dockerhub_pass }}
# --email={{ dockerhub_email }} # --email={{ dockerhub_email }}
- meta: flush_handlers

View File

@@ -6,19 +6,19 @@
- ca-certificates - ca-certificates
- name: Configure docker apt repository - name: Configure docker apt repository
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list backup=yes
- name: Install docker-engine - name: Install docker-engine
apt: pkg={{ item }} state=present force=yes update_cache=yes apt: pkg={{ item }} state=present force=yes update_cache=yes
with_items: with_items:
- aufs-tools - aufs-tools
- cgroupfs-mount - cgroupfs-mount
- docker-engine=1.8.2-0~{{ ansible_distribution_release }} - docker-engine=1.8.3-0~{{ ansible_distribution_release }}
- name: Copy default docker configuration - name: Copy default docker configuration
template: src=default-docker.j2 dest=/etc/default/docker template: src=default-docker.j2 dest=/etc/default/docker backup=yes
notify: restart docker notify: restart docker
- name: Copy Docker systemd unit file - name: Copy Docker systemd unit file
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service backup=yes
notify: restart docker notify: restart docker

View File

@@ -1,14 +0,0 @@
#!/bin/bash
# Create calico bridge cbr0 if it doesn't exist
ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
if ! [[ "${ifaces}" =~ "cbr0" ]];then
brctl addbr cbr0
ip link set cbr0 up
fi
# Configure calico bridge ip
br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
fi

View File

@@ -4,9 +4,7 @@
#DOCKER="/usr/local/bin/docker" #DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options. # Use DOCKER_OPTS to modify the daemon startup options.
{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %} #DOCKER_OPTS=""
DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
{% endif %}
# If you need Docker to use an HTTP proxy, it can also be specified here. # If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/" #export http_proxy="http://127.0.0.1:3128/"

View File

@@ -1 +1 @@
deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main deb https://apt.dockerproject.org/repo {{ansible_distribution|lower}}-{{ ansible_distribution_release}} main

View File

@@ -3,3 +3,11 @@ etcd_download_url: https://github.com/coreos/etcd/releases/download
flannel_download_url: https://github.com/coreos/flannel/releases/download flannel_download_url: https://github.com/coreos/flannel/releases/download
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
etcd_version: v2.2.2
flannel_version: 0.5.5
kube_version: v1.1.2
kube_sha1: 69d110d371752c6492d2f8695aa7a47be5b6ed4e
calico_version: v0.11.0

View File

@@ -1,8 +0,0 @@
---
etcd_version: v2.2.0
flannel_version: 0.5.3
kube_version: v1.0.6
kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
calico_version: v0.5.1

View File

@@ -6,7 +6,7 @@
file: path=/etc/systemd/system/etcd2.service.d state=directory file: path=/etc/systemd/system/etcd2.service.d state=directory
- name: Write etcd2 config file - name: Write etcd2 config file
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf backup=yes
notify: notify:
- reload systemd - reload systemd
- restart etcd2 - restart etcd2

View File

@@ -21,4 +21,5 @@
template: template:
src: systemd-etcd2.service.j2 src: systemd-etcd2.service.j2
dest: /lib/systemd/system/etcd2.service dest: /lib/systemd/system/etcd2.service
backup: yes
notify: restart daemons notify: restart daemons

View File

@@ -11,9 +11,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
# look in here. Don't do it. # look in here. Don't do it.
kube_config_dir: /etc/kubernetes kube_config_dir: /etc/kubernetes
# The port the API Server will be listening on.
kube_master_port: 443
# This is where all the cert scripts and certs will be located # This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/certs" kube_cert_dir: "{{ kube_config_dir }}/certs"
@@ -33,9 +30,15 @@ kube_cert_group: kube-cert
dns_domain: "{{ cluster_name }}" dns_domain: "{{ cluster_name }}"
kube_proxy_mode: iptables
# IP address of the DNS server. # IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS # Kubernetes will create a pod with several containers, serving as the DNS
# server and expose it under this IP address. The IP address must be from # server and expose it under this IP address. The IP address must be from
# the range specified as kube_service_addresses. This magic will actually # the range specified as kube_service_addresses. This magic will actually
# pick the 10th ip address in the kube_service_addresses range and use that. # pick the 10th ip address in the kube_service_addresses range and use that.
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" # dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
# kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true

View File

@@ -19,7 +19,7 @@
args: args:
creates: "{{ kube_cert_dir }}/server.crt" creates: "{{ kube_cert_dir }}/server.crt"
environment: environment:
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" MASTER_IP: "{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}"
MASTER_NAME: "{{ inventory_hostname }}" MASTER_NAME: "{{ inventory_hostname }}"
DNS_DOMAIN: "{{ dns_domain }}" DNS_DOMAIN: "{{ dns_domain }}"
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}" SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"

View File

@@ -45,6 +45,10 @@
notify: notify:
- restart daemons - restart daemons
- debug: msg="{{groups['kube-master'][0]}} == {{inventory_hostname}}"
tags:
- debug
- include: gen_tokens.yml - include: gen_tokens.yml
run_once: true run_once: true
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]

File diff suppressed because it is too large Load Diff

View File

@@ -3,30 +3,54 @@
command: /bin/true command: /bin/true
notify: notify:
- reload systemd - reload systemd
- restart apiserver - restart reloaded-scheduler
- restart controller-manager - restart reloaded-controller-manager
- restart scheduler - restart reloaded-apiserver
- restart proxy - restart reloaded-proxy
- name: reload systemd - name: reload systemd
command: systemctl daemon-reload command: systemctl daemon-reload
- name: restart apiserver - name: restart apiserver
command: /bin/true
notify:
- reload systemd
- restart reloaded-apiserver
- name: restart reloaded-apiserver
service: service:
name: kube-apiserver name: kube-apiserver
state: restarted state: restarted
- name: restart controller-manager - name: restart controller-manager
command: /bin/true
notify:
- reload systemd
- restart reloaded-controller-manager
- name: restart reloaded-controller-manager
service: service:
name: kube-controller-manager name: kube-controller-manager
state: restarted state: restarted
- name: restart scheduler - name: restart scheduler
command: /bin/true
notify:
- reload systemd
- restart reloaded-scheduler
- name: restart reloaded-scheduler
service: service:
name: kube-scheduler name: kube-scheduler
state: restarted state: restarted
- name: restart proxy - name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service: service:
name: kube-proxy name: kube-proxy
state: restarted state: restarted

View File

@@ -18,56 +18,57 @@
proxy_token: "{{ tokens.results[3].content|b64decode }}" proxy_token: "{{ tokens.results[3].content|b64decode }}"
- name: write the config files for api server - name: write the config files for api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes
notify: notify:
- restart daemons - restart apiserver
- name: write config file for controller-manager - name: write config file for controller-manager
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes
notify: notify:
- restart controller-manager - restart controller-manager
- name: write the kubecfg (auth) file for controller-manager - name: write the kubecfg (auth) file for controller-manager
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig backup=yes
notify: notify:
- restart controller-manager - restart controller-manager
- name: write the config file for scheduler - name: write the config file for scheduler
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler backup=yes
notify: notify:
- restart scheduler - restart scheduler
- name: write the kubecfg (auth) file for scheduler - name: write the kubecfg (auth) file for scheduler
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig backup=yes
notify: notify:
- restart scheduler - restart scheduler
- name: write the kubecfg (auth) file for kubectl - name: write the kubecfg (auth) file for kubectl
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig backup=yes
- name: write the config files for proxy - name: Copy kubectl bash completion
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy copy: src=kubectl_bash_completion.sh dest=/etc/bash_completion.d/kubectl.sh
- name: Create proxy environment vars dir
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
notify: notify:
- restart daemons - restart proxy
- name: write the kubecfg (auth) file for proxy - name: write the kubecfg (auth) file for proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
- name: populate users for basic auth in API - name: populate users for basic auth in API
lineinfile: lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv" dest: "{{ kube_users_dir }}/known_users.csv"
create: yes create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}" with_dict: "{{ kube_users }}"
notify: notify:
- restart apiserver - restart apiserver
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started
- name: Enable controller-manager - name: Enable controller-manager
service: service:
name: kube-controller-manager name: kube-controller-manager
@@ -85,3 +86,9 @@
name: kube-proxy name: kube-proxy
enabled: yes enabled: yes
state: started state: started
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started

View File

@@ -1,19 +1,19 @@
--- ---
- name: Write kube-apiserver systemd init file - name: Write kube-apiserver systemd init file
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes
notify: restart daemons notify: restart apiserver
- name: Write kube-controller-manager systemd init file - name: Write kube-controller-manager systemd init file
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes
notify: restart daemons notify: restart controller-manager
- name: Write kube-scheduler systemd init file - name: Write kube-scheduler systemd init file
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes
notify: restart daemons notify: restart scheduler
- name: Write kube-proxy systemd init file - name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
notify: restart daemons notify: restart proxy
- name: Install kubernetes binaries - name: Install kubernetes binaries
copy: copy:

View File

@@ -8,7 +8,7 @@
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on. # The port on the local server to listen on.
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}" KUBE_API_PORT="--insecure-port={{kube_master_insecure_port}} --secure-port={{ kube_master_port }}"
# KUBELET_PORT="--kubelet_port=10250" # KUBELET_PORT="--kubelet_port=10250"
@@ -21,5 +21,8 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node
# default admission control policies # default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# RUNTIME API CONFIGURATION (e.g. enable extensions)
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
# Add you own! # Add you own!
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt" KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"

View File

@@ -4,4 +4,5 @@
# default config should be adequate # default config should be adequate
# Add your own! # Add your own!
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig" [Service]
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"

View File

@@ -10,7 +10,7 @@ contexts:
clusters: clusters:
- cluster: - cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt certificate-authority: {{ kube_cert_dir }}/ca.crt
server: http://{{ groups['kube-master'][0] }}:8080 server: http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}
name: {{ cluster_name }} name: {{ cluster_name }}
users: users:
- name: proxy - name: proxy

View File

@@ -19,6 +19,7 @@ ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_ALLOW_PRIV \ $KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \ $KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \ $KUBE_ADMISSION_CONTROL \
$KUBE_RUNTIME_CONFIG \
$KUBE_API_ARGS $KUBE_API_ARGS
Restart=on-failure Restart=on-failure
Type=notify Type=notify

View File

@@ -1,13 +1,14 @@
[Unit] [Unit]
Description=Kubernetes Kube-Proxy Server Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %} {% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service calico-node.service After=docker.service calico-node.service
{% else %} {% else %}
After=docker.service After=docker.service
{% endif %} {% endif %}
[Service] [Service]
EnvironmentFile=/etc/kubernetes/config
EnvironmentFile=/etc/network-environment EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \ ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \ $KUBE_LOGTOSTDERR \

View File

@@ -2,18 +2,31 @@
- name: restart daemons - name: restart daemons
command: /bin/true command: /bin/true
notify: notify:
- restart kubelet - reload systemd
- restart proxy - restart reloaded-kubelet
- restart reloaded-proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet - name: restart kubelet
command: /bin/true
notify:
- reload systemd
- restart reloaded-kubelet
- name: restart reloaded-kubelet
service: service:
name: kubelet name: kubelet
state: restarted state: restarted
- name: restart proxy - name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service: service:
name: kube-proxy name: kube-proxy
state: restarted state: restarted
- name: reload systemd
command: systemctl daemon-reload

View File

@@ -18,13 +18,12 @@
file: path=/etc/systemd/system/kubelet.service.d state=directory file: path=/etc/systemd/system/kubelet.service.d state=directory
- name: Write kubelet config file - name: Write kubelet config file
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes
notify: notify:
- reload systemd
- restart kubelet - restart kubelet
- name: write the kubecfg (auth) file for kubelet - name: write the kubecfg (auth) file for kubelet
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig backup=yes
notify: notify:
- restart kubelet - restart kubelet
@@ -32,13 +31,12 @@
file: path=/etc/systemd/system/kube-proxy.service.d state=directory file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file - name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
notify: notify:
- reload systemd
- restart proxy - restart proxy
- name: write the kubecfg (auth) file for kube-proxy - name: write the kubecfg (auth) file for kube-proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
notify: notify:
- restart proxy - restart proxy

View File

@@ -1,10 +1,10 @@
--- ---
- name: Write kube-proxy systemd init file - name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
notify: restart daemons notify: restart daemons
- name: Write kubelet systemd init file - name: Write kubelet systemd init file
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
notify: restart daemons notify: restart daemons
- name: Install kubernetes binaries - name: Install kubernetes binaries

View File

@@ -16,6 +16,6 @@ Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns
{% else %} {% else %}
Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
{% endif %} {% endif %}
{% if overlay_network_plugin|default('') %} {% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ overlay_network_plugin }}" Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ kube_network_plugin }}"
{% endif %} {% endif %}

View File

@@ -5,7 +5,7 @@ preferences: {}
clusters: clusters:
- cluster: - cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:443 server: https://{{ groups['kube-master'][0] }}:{{kube_master_port}}
name: {{ cluster_name }} name: {{ cluster_name }}
contexts: contexts:
- context: - context:

View File

@@ -3,4 +3,4 @@
# default config should be adequate # default config should be adequate
[Service] [Service]
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig" Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"

View File

@@ -1,13 +1,14 @@
[Unit] [Unit]
Description=Kubernetes Kube-Proxy Server Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %} {% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service calico-node.service After=docker.service calico-node.service
{% else %} {% else %}
After=docker.service After=docker.service
{% endif %} {% endif %}
[Service] [Service]
EnvironmentFile=/etc/kubernetes/config
EnvironmentFile=/etc/network-environment EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \ ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \ $KUBE_LOGTOSTDERR \

View File

@@ -1,14 +1,14 @@
[Unit] [Unit]
Description=Kubernetes Kubelet Server Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %} {% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service calico-node.service After=docker.service calico-node.service
{% else %} {% else %}
After=docker.service After=docker.service
{% endif %} {% endif %}
[Service] [Service]
#WorkingDirectory=/var/lib/kubelet EnvironmentFile=/etc/kubernetes/config
EnvironmentFile=/etc/network-environment EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kubelet \ ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \ $KUBE_LOGTOSTDERR \

View File

@@ -11,36 +11,36 @@
- name: Calico | Write calico-node systemd init file - name: Calico | Write calico-node systemd init file
template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
register: newservice
notify: notify:
- reload systemd - reload systemd
- restart calico-node - restart calico-node
- name: Calico | Write network-environment - name: Calico | daemon-reload
template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x command: systemctl daemon-reload
notify: when: newservice|changed
- reload systemd changed_when: False
- restart calico-node
- name: Calico | Enable calico-node - name: Calico | Enable calico-node
service: name=calico-node enabled=yes state=started service: name=calico-node enabled=yes state=started
- name: Calico | Configure calico-node desired pool
shell: calicoctl pool add {{ kube_pods_subnet }}
environment:
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
run_once: true
- name: Calico | Configure calico-node remove default pool - name: Calico | Configure calico-node remove default pool
shell: calicoctl pool remove 192.168.0.0/16 shell: calicoctl pool remove 192.168.0.0/16
environment: environment:
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001" ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
run_once: true run_once: true
- name: Calico | Configure calico-node desired pool
shell: calicoctl pool add {{ overlay_network_subnet }}
environment:
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
run_once: true
- name: Calico | Disable node mesh - name: Calico | Disable node mesh
shell: calicoctl bgp node-mesh off shell: calicoctl bgp node-mesh off
when: peer_with_router and inventory_hostname in groups['kube-node'] when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s) - name: Calico | Configure peering with router(s)
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }} shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
with_items: peers with_items: peers
when: peer_with_router and inventory_hostname in groups['kube-node'] when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']

View File

@@ -0,0 +1,17 @@
---
- name: "Test if network plugin is defined"
fail: msg="ERROR, One network_plugin variable must be defined (Flannel or Calico)"
when: ( kube_network_plugin is defined and kube_network_plugin == "calico" and kube_network_plugin == "flannel" ) or
kube_network_plugin is not defined
- include: flannel.yml
when: kube_network_plugin == "flannel"
- name: Calico | Write network-environment
template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
when: kube_network_plugin == "calico"
- include: calico.yml
when: kube_network_plugin == "calico"
- meta: flush_handlers

View File

@@ -0,0 +1,20 @@
[Unit]
Description=Calico per-node agent
Documentation=https://github.com/projectcalico/calico-docker
Requires=docker.service
After=docker.service etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
User=root
PermissionsStartOnly=true
{% if inventory_hostname in groups['kube-node'] and peer_with_router|default(false)%}
ExecStart={{ bin_dir }}/calicoctl node --kubernetes --ip=${DEFAULT_IPV4} --as={{ local_as }} --detach=false
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --kubernetes --ip=${DEFAULT_IPV4} --detach=false
{% endif %}
Restart=always
Restart=10
[Install]
WantedBy=multi-user.target

View File

@@ -1,7 +1,7 @@
#! /usr/bin/bash #! /usr/bin/bash
# This node's IPv4 address # This node's IPv4 address
CALICO_IPAM=true CALICO_IPAM=true
DEFAULT_IPV4={{ ansible_default_ipv4.address }} DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
{% if inventory_hostname in groups['kube-node'] %} {% if inventory_hostname in groups['kube-node'] %}
# The kubernetes master IP # The kubernetes master IP
@@ -12,7 +12,7 @@ KUBERNETES_MASTER={{ groups['kube-master'][0] }}
ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001 ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001
# The kubernetes-apiserver location - used by the calico plugin # The kubernetes-apiserver location - used by the calico plugin
KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:8080/api/v1/ KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}/api/v1/
# Location of the calicoctl binary - used by the calico plugin # Location of the calicoctl binary - used by the calico plugin
CALICOCTL_PATH="{{ bin_dir }}/calicoctl" CALICOCTL_PATH="{{ bin_dir }}/calicoctl"

View File

@@ -0,0 +1 @@
{ "Network": "{{ kube_service_addresses }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "vxlan" } }

View File

@@ -1,13 +0,0 @@
---
- name: "Test if overlay network is defined"
fail: msg="ERROR, One overlay_network variable must be defined (Flannel or Calico)"
when: ( overlay_network_plugin is defined and overlay_network_plugin == "calico" and overlay_network_plugin == "flannel" ) or
overlay_network_plugin is not defined
- include: flannel.yml
when: overlay_network_plugin == "flannel"
- include: calico.yml
when: overlay_network_plugin == "calico"
- meta: flush_handlers

View File

@@ -1,23 +0,0 @@
[Unit]
Description=calicoctl node
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
User=root
PermissionsStartOnly=true
ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
{% if inventory_hostname in groups['kube-node'] %}
{% if peer_with_router %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --as={{ local_as }} --kubernetes
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
{% endif %}
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
{% endif %}
RemainAfterExit=yes
Type=oneshot
[Install]
WantedBy=multi-user.target

View File

@@ -1 +0,0 @@
{ "Network": "{{ kube_service_addresses }}", "SubnetLen": {{ overlay_network_host_prefix }}, "Backend": { "Type": "vxlan" } }