mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
190 Commits
v1.0
...
1.3.0_k1.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af8f394714 | ||
|
|
eab2cec0ad | ||
|
|
0b17a4c00f | ||
|
|
f49aa90bf7 | ||
|
|
6f9148e994 | ||
|
|
7c8e9dbe00 | ||
|
|
df3d0bcc21 | ||
|
|
7913d62749 | ||
|
|
d5320961e9 | ||
|
|
9c461e1018 | ||
|
|
9a03249446 | ||
|
|
4e015dd3f1 | ||
|
|
6f53269ce3 | ||
|
|
e356b2de4f | ||
|
|
8fa0110e28 | ||
|
|
2a08f7bc0a | ||
|
|
99d16913d3 | ||
|
|
d172457504 | ||
|
|
6103d673b7 | ||
|
|
29bf90a858 | ||
|
|
2c35e4c055 | ||
|
|
e3cdb3574a | ||
|
|
15cd1bfc56 | ||
|
|
392570f4ff | ||
|
|
be5fe9af54 | ||
|
|
7006d56ab8 | ||
|
|
1695682d85 | ||
|
|
1d1d8b9c28 | ||
|
|
98fe2c02b2 | ||
|
|
92c2a9457e | ||
|
|
a11e0cb3d1 | ||
|
|
dbb6f4934e | ||
|
|
9f07f2a951 | ||
|
|
005ddedb94 | ||
|
|
b72e220126 | ||
|
|
e0f460d9b5 | ||
|
|
2bd6b83656 | ||
|
|
2df70d6a3d | ||
|
|
ddaeb2b8fa | ||
|
|
6f4f170a88 | ||
|
|
3f3b03bc99 | ||
|
|
c9d9ccf025 | ||
|
|
e378f4fb14 | ||
|
|
5c15d14f12 | ||
|
|
b45747ec86 | ||
|
|
d597f707f1 | ||
|
|
4388cab8d6 | ||
|
|
595e93e6da | ||
|
|
5f4e01cec5 | ||
|
|
7c9c609ac4 | ||
|
|
680864f95c | ||
|
|
7315d33e3c | ||
|
|
b2afbfd4fb | ||
|
|
ab694ee291 | ||
|
|
bba3525cd8 | ||
|
|
2c816f66a3 | ||
|
|
d585ceaf3b | ||
|
|
fec1dc9041 | ||
|
|
e7e03bae9f | ||
|
|
b81a064242 | ||
|
|
03d402e226 | ||
|
|
0a238d9853 | ||
|
|
4fe0ced5db | ||
|
|
c6d65cb535 | ||
|
|
a0746a3efd | ||
|
|
46807c655d | ||
|
|
970aab70e1 | ||
|
|
4561dd327b | ||
|
|
94c0c32752 | ||
|
|
b155e8cc7b | ||
|
|
9046b7b1bf | ||
|
|
3c450191ea | ||
|
|
184bb8c94d | ||
|
|
a003d91576 | ||
|
|
9914229484 | ||
|
|
b3841659d7 | ||
|
|
3a349b8519 | ||
|
|
6e91b6f47c | ||
|
|
bf5c531037 | ||
|
|
44ac355aa7 | ||
|
|
958c770bef | ||
|
|
6012230110 | ||
|
|
61bb6468ef | ||
|
|
f2069b296c | ||
|
|
9649f2779d | ||
|
|
c91a3183d3 | ||
|
|
693230ace9 | ||
|
|
f21f660cc5 | ||
|
|
43afd42f59 | ||
|
|
4d1828c724 | ||
|
|
953f482585 | ||
|
|
4055980ce6 | ||
|
|
e2984b4fdb | ||
|
|
394a64f904 | ||
|
|
2fc8b46996 | ||
|
|
5efc09710b | ||
|
|
f908309739 | ||
|
|
9862afb097 | ||
|
|
59994a6df1 | ||
|
|
0a1b92f348 | ||
|
|
af9b945874 | ||
|
|
3cbcd6f189 | ||
|
|
1568cbe8e9 | ||
|
|
eb4dd5f19d | ||
|
|
fd0e5e756e | ||
|
|
f49620517e | ||
|
|
ef8a46b8c5 | ||
|
|
47c211f9c1 | ||
|
|
b23b8aa3de | ||
|
|
3981b73924 | ||
|
|
e0ec3e7241 | ||
|
|
b66cc67b6f | ||
|
|
83c1105192 | ||
|
|
d9a8de487f | ||
|
|
d1e19563b0 | ||
|
|
3014dfef24 | ||
|
|
b92fa01e05 | ||
|
|
e3ebc8e009 | ||
|
|
625efc85af | ||
|
|
d30474d305 | ||
|
|
9cecc30b6d | ||
|
|
563be70728 | ||
|
|
a03f3739dc | ||
|
|
bfe78848fa | ||
|
|
126d4e36c8 | ||
|
|
97c4edc028 | ||
|
|
f74c195d47 | ||
|
|
2374878ef7 | ||
|
|
b9e56dd435 | ||
|
|
ede5f9592a | ||
|
|
a6137b3aee | ||
|
|
da3920496d | ||
|
|
895a02e274 | ||
|
|
b4b20c9dbc | ||
|
|
fe8eff07d3 | ||
|
|
941cae2a4c | ||
|
|
4a9a82ca86 | ||
|
|
d2ac5ac54b | ||
|
|
4c2f757fe8 | ||
|
|
e701c3d49d | ||
|
|
5762d8f301 | ||
|
|
9a278bae00 | ||
|
|
d3f35e12a2 | ||
|
|
d7b7db34fa | ||
|
|
4dd85b5078 | ||
|
|
7f73bb5522 | ||
|
|
795ce8468d | ||
|
|
fb6dd60f52 | ||
|
|
e427591545 | ||
|
|
9b8c89ebb0 | ||
|
|
323155b0e1 | ||
|
|
f368faf66b | ||
|
|
8fa7811b63 | ||
|
|
c352df6fc8 | ||
|
|
34419d6bae | ||
|
|
d94bc8e599 | ||
|
|
57e1831f78 | ||
|
|
1a0208f448 | ||
|
|
5319f23e73 | ||
|
|
b45261b763 | ||
|
|
10ade2cbdc | ||
|
|
471dad44b6 | ||
|
|
3f411bffe4 | ||
|
|
5cc29b77aa | ||
|
|
70aa68b9c7 | ||
|
|
7efaf30d36 | ||
|
|
0b164bec02 | ||
|
|
3f8f0f550b | ||
|
|
d6a790ec46 | ||
|
|
8eef0db3ec | ||
|
|
2b3543d0ee | ||
|
|
c997860e1c | ||
|
|
27b0980622 | ||
|
|
3fb9101e40 | ||
|
|
3bf74530ce | ||
|
|
f6e4cc530c | ||
|
|
e85fb0460e | ||
|
|
f0eb963f5e | ||
|
|
f216302f95 | ||
|
|
b98227e9a4 | ||
|
|
f27a3f047f | ||
|
|
8e585cfdfe | ||
|
|
0af0a3517f | ||
|
|
73e240c644 | ||
|
|
533fe3b8e6 | ||
|
|
95403e9d93 | ||
|
|
250ed9d56b | ||
|
|
6381e75769 | ||
|
|
71e4b185c5 | ||
|
|
a3c5be2c9d |
37
.gitmodules
vendored
37
.gitmodules
vendored
@@ -1,30 +1,49 @@
|
|||||||
[submodule "roles/apps/k8s-kube-ui"]
|
[submodule "roles/apps/k8s-kube-ui"]
|
||||||
path = roles/apps/k8s-kube-ui
|
path = roles/apps/k8s-kube-ui
|
||||||
url = https://github.com/ansibl8s/k8s-kube-ui.git
|
url = https://github.com/ansibl8s/k8s-kube-ui.git
|
||||||
[submodule "roles/apps/k8s-skydns"]
|
branch = v1.0
|
||||||
path = roles/apps/k8s-skydns
|
[submodule "roles/apps/k8s-kubedns"]
|
||||||
url = https://github.com/ansibl8s/k8s-skydns.git
|
path = roles/apps/k8s-kubedns
|
||||||
|
url = https://github.com/ansibl8s/k8s-kubedns.git
|
||||||
|
branch = v1.0
|
||||||
[submodule "roles/apps/k8s-common"]
|
[submodule "roles/apps/k8s-common"]
|
||||||
path = roles/apps/k8s-common
|
path = roles/apps/k8s-common
|
||||||
url = https://github.com/ansibl8s/k8s-common.git
|
url = https://github.com/ansibl8s/k8s-common.git
|
||||||
|
branch = v1.0
|
||||||
[submodule "roles/apps/k8s-redis"]
|
[submodule "roles/apps/k8s-redis"]
|
||||||
path = roles/apps/k8s-redis
|
path = roles/apps/k8s-redis
|
||||||
url = https://github.com/ansibl8s/k8s-redis.git
|
url = https://github.com/ansibl8s/k8s-redis.git
|
||||||
|
branch = v1.0
|
||||||
[submodule "roles/apps/k8s-elasticsearch"]
|
[submodule "roles/apps/k8s-elasticsearch"]
|
||||||
path = roles/apps/k8s-elasticsearch
|
path = roles/apps/k8s-elasticsearch
|
||||||
url = https://github.com/ansibl8s/k8s-elasticsearch.git
|
url = https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||||
[submodule "roles/apps/k8s-fabric8"]
|
[submodule "roles/apps/k8s-fabric8"]
|
||||||
path = roles/apps/k8s-fabric8
|
path = roles/apps/k8s-fabric8
|
||||||
url = https://github.com/ansibl8s/k8s-fabric8.git
|
url = https://github.com/ansibl8s/k8s-fabric8.git
|
||||||
|
branch = v1.0
|
||||||
[submodule "roles/apps/k8s-memcached"]
|
[submodule "roles/apps/k8s-memcached"]
|
||||||
path = roles/apps/k8s-memcached
|
path = roles/apps/k8s-memcached
|
||||||
url = https://github.com/ansibl8s/k8s-memcached.git
|
url = https://github.com/ansibl8s/k8s-memcached.git
|
||||||
[submodule "roles/apps/k8s-haproxy"]
|
branch = v1.0
|
||||||
path = roles/apps/k8s-haproxy
|
|
||||||
url = https://github.com/ansibl8s/k8s-haproxy.git
|
|
||||||
[submodule "roles/apps/k8s-postgres"]
|
[submodule "roles/apps/k8s-postgres"]
|
||||||
path = roles/apps/k8s-postgres
|
path = roles/apps/k8s-postgres
|
||||||
url = https://github.com/ansibl8s/k8s-postgres.git
|
url = https://github.com/ansibl8s/k8s-postgres.git
|
||||||
[submodule "roles/apps/k8s-kubedns"]
|
branch = v1.0
|
||||||
path = roles/apps/k8s-kubedns
|
[submodule "roles/apps/k8s-kubedash"]
|
||||||
url = https://github.com/ansibl8s/k8s-kubedns.git
|
path = roles/apps/k8s-kubedash
|
||||||
|
url = https://github.com/ansibl8s/k8s-kubedash.git
|
||||||
|
[submodule "roles/apps/k8s-heapster"]
|
||||||
|
path = roles/apps/k8s-heapster
|
||||||
|
url = https://github.com/ansibl8s/k8s-heapster.git
|
||||||
|
[submodule "roles/apps/k8s-influxdb"]
|
||||||
|
path = roles/apps/k8s-influxdb
|
||||||
|
url = https://github.com/ansibl8s/k8s-influxdb.git
|
||||||
|
[submodule "roles/apps/k8s-kube-logstash"]
|
||||||
|
path = roles/apps/k8s-kube-logstash
|
||||||
|
url = https://github.com/ansibl8s/k8s-kube-logstash.git
|
||||||
|
[submodule "roles/apps/k8s-etcd"]
|
||||||
|
path = roles/apps/k8s-etcd
|
||||||
|
url = https://github.com/ansibl8s/k8s-etcd.git
|
||||||
|
[submodule "roles/apps/k8s-rabbitmq"]
|
||||||
|
path = roles/apps/k8s-rabbitmq
|
||||||
|
url = https://github.com/ansibl8s/k8s-rabbitmq.git
|
||||||
|
|||||||
41
.travis.yml
Normal file
41
.travis.yml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
sudo: required
|
||||||
|
dist: trusty
|
||||||
|
language: python
|
||||||
|
python: "2.7"
|
||||||
|
|
||||||
|
addons:
|
||||||
|
hosts:
|
||||||
|
- node1
|
||||||
|
|
||||||
|
env:
|
||||||
|
- SITE=cluster.yml
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- sudo apt-get update -qq
|
||||||
|
|
||||||
|
install:
|
||||||
|
# Install Ansible.
|
||||||
|
- sudo -H pip install ansible
|
||||||
|
- sudo -H pip install netaddr
|
||||||
|
|
||||||
|
cache:
|
||||||
|
directories:
|
||||||
|
- $HOME/releases
|
||||||
|
- $HOME/.cache/pip
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- export PATH=$PATH:/usr/local/bin
|
||||||
|
|
||||||
|
script:
|
||||||
|
# Check the role/playbook's syntax.
|
||||||
|
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --syntax-check"
|
||||||
|
|
||||||
|
# Run the role/playbook with ansible-playbook.
|
||||||
|
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local"
|
||||||
|
|
||||||
|
# Run the role/playbook again, checking to make sure it's idempotent.
|
||||||
|
- >
|
||||||
|
sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local
|
||||||
|
| tee /dev/stderr | grep -q 'changed=0.*failed=0'
|
||||||
|
&& (echo 'Idempotence test: pass' && exit 0)
|
||||||
|
|| (echo 'Idempotence test: fail' && exit 1)
|
||||||
229
README.md
229
README.md
@@ -1,36 +1,104 @@
|
|||||||
|
[](https://travis-ci.org/ansibl8s/setup-kubernetes)
|
||||||
kubernetes-ansible
|
kubernetes-ansible
|
||||||
========
|
========
|
||||||
|
|
||||||
Install and configure a kubernetes cluster including network overlay and optionnal addons.
|
Install and configure a Multi-Master/HA kubernetes cluster including network plugin.
|
||||||
Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
|
|
||||||
|
|
||||||
### Requirements
|
### Requirements
|
||||||
Tested on **Debian Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
|
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
|
||||||
The target servers must have access to the Internet in order to pull docker imaqes.
|
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
|
||||||
The firewalls are not managed, you'll need to implement your own rules the way you used to.
|
* The target servers must have access to the Internet in order to pull docker imaqes.
|
||||||
|
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
|
||||||
Ansible v1.9.x
|
* Ansible v1.9.x and python-netaddr
|
||||||
|
|
||||||
### Components
|
### Components
|
||||||
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.0.6
|
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3
|
||||||
* [etcd](https://github.com/coreos/etcd/releases) v2.2.0
|
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
|
||||||
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.5.1
|
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.13.0
|
||||||
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.3
|
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
|
||||||
* [docker](https://www.docker.com/) v1.8.2
|
* [docker](https://www.docker.com/) v1.9.1
|
||||||
|
|
||||||
|
Quickstart
|
||||||
|
-------------------------
|
||||||
|
The following steps will quickly setup a kubernetes cluster with default configuration.
|
||||||
|
These defaults are good for tests purposes.
|
||||||
|
|
||||||
|
Edit the inventory according to the number of servers
|
||||||
|
```
|
||||||
|
[downloader]
|
||||||
|
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
10.115.99.31
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
10.115.99.31
|
||||||
|
10.115.99.32
|
||||||
|
10.115.99.33
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
10.115.99.32
|
||||||
|
10.115.99.33
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the playbook
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/inventory.cfg cluster.yml -u root
|
||||||
|
```
|
||||||
|
|
||||||
|
You can jump directly to "*Available apps, installation procedure*"
|
||||||
|
|
||||||
|
|
||||||
Ansible
|
Ansible
|
||||||
-------------------------
|
-------------------------
|
||||||
### Download binaries
|
|
||||||
A role allows to download required binaries which will be stored in a directory defined by the variable
|
|
||||||
**'local_release_dir'** (by default /tmp).
|
|
||||||
Please ensure that you have enough disk space there (about **1G**).
|
|
||||||
|
|
||||||
**Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory.
|
|
||||||
|
|
||||||
|
|
||||||
### Variables
|
### Variables
|
||||||
The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```.
|
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
||||||
|
|
||||||
|
### Inventory
|
||||||
|
Below is an example of an inventory.
|
||||||
|
Note : The bgp vars local_as and peers are not mandatory if the var **'peer_with_router'** is set to false
|
||||||
|
By default this variable is set to false and therefore all the nodes are configure in **'node-mesh'** mode.
|
||||||
|
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
[downloader]
|
||||||
|
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node3 ansible_ssh_host=10.99.0.4
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node3 ansible_ssh_host=10.99.0.4
|
||||||
|
node4 ansible_ssh_host=10.99.0.5
|
||||||
|
node5 ansible_ssh_host=10.99.0.36
|
||||||
|
node6 ansible_ssh_host=10.99.0.37
|
||||||
|
|
||||||
|
[paris]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||||
|
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||||
|
|
||||||
|
[new-york]
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||||
|
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
```
|
||||||
|
|
||||||
### Playbook
|
### Playbook
|
||||||
```
|
```
|
||||||
@@ -42,66 +110,72 @@ The main variables to change are located in the directory ```environments/[env_n
|
|||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker }
|
||||||
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: etcd, tags: etcd }
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: dnsmasq, tags: dnsmasq }
|
||||||
|
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
|
||||||
- { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
|
|
||||||
|
|
||||||
- hosts: kube-node
|
|
||||||
roles:
|
|
||||||
- { role: kubernetes/node, tags: node }
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
It is possible to define variables for different environments.
|
It is possible to define variables for different environments.
|
||||||
For instance, in order to deploy the cluster on 'dev' environment run the following command.
|
For instance, in order to deploy the cluster on 'dev' environment run the following command.
|
||||||
```
|
```
|
||||||
ansible-playbook -i environments/dev/inventory cluster.yml -u root
|
ansible-playbook -i inventory/dev/inventory.cfg cluster.yml -u root
|
||||||
```
|
```
|
||||||
|
|
||||||
Kubernetes
|
Kubernetes
|
||||||
-------------------------
|
-------------------------
|
||||||
|
### Multi master notes
|
||||||
|
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
|
||||||
|
the server address has to be present on both groups 'kube-master' and 'kube-node'.
|
||||||
|
|
||||||
|
* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
|
||||||
|
|
||||||
|
* For safety reasons, you should have at least two master nodes and 3 etcd servers
|
||||||
|
|
||||||
|
* Kube-proxy doesn't support multiple apiservers on startup ([Issue 18174](https://github.com/kubernetes/kubernetes/issues/18174)). An external loadbalancer needs to be configured.
|
||||||
|
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
|
||||||
|
|
||||||
|
|
||||||
### Network Overlay
|
### Network Overlay
|
||||||
You can choose between 2 network overlays. Only one must be chosen.
|
You can choose between 2 network plugins. Only one must be chosen.
|
||||||
|
|
||||||
* **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel'))
|
* **flannel**: gre/vxlan (layer 2) networking. ([official docs](https://github.com/coreos/flannel))
|
||||||
|
|
||||||
* **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/'))
|
* **calico**: bgp (layer 3) networking. ([official docs](http://docs.projectcalico.org/en/0.13/))
|
||||||
|
|
||||||
The choice is defined with the variable '**overlay_network_plugin**'
|
The choice is defined with the variable '**kube_network_plugin**'
|
||||||
|
|
||||||
### Expose a service
|
### Expose a service
|
||||||
There are several loadbalancing solutions.
|
There are several loadbalancing solutions.
|
||||||
The ones i found suitable for kubernetes are [Vulcand]('http://vulcand.io/') and [Haproxy]('http://www.haproxy.org/')
|
The one i found suitable for kubernetes are [Vulcand](http://vulcand.io/) and [Haproxy](http://www.haproxy.org/)
|
||||||
|
|
||||||
My cluster is working with haproxy and kubernetes services are configured with the loadbalancing type '**nodePort**'.
|
My cluster is working with haproxy and kubernetes services are configured with the loadbalancing type '**nodePort**'.
|
||||||
eg: each node opens the same tcp port and forwards the traffic to the target pod wherever it is located.
|
eg: each node opens the same tcp port and forwards the traffic to the target pod wherever it is located.
|
||||||
|
|
||||||
Then Haproxy can be configured to request kubernetes's api in order to loadbalance on the proper tcp port on the nodes.
|
Then Haproxy can be configured to request kubernetes's api in order to loadbalance on the proper tcp port on the nodes.
|
||||||
|
|
||||||
Please refer to the proper kubernetes documentation on [Services]('https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md')
|
Please refer to the proper kubernetes documentation on [Services](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md)
|
||||||
|
|
||||||
### Check cluster status
|
### Check cluster status
|
||||||
|
|
||||||
#### Kubernetes components
|
#### Kubernetes components
|
||||||
Master processes : kube-apiserver, kube-scheduler, kube-controller, kube-proxy
|
|
||||||
Nodes processes : kubelet, kube-proxy, [calico-node|flanneld]
|
|
||||||
|
|
||||||
* Check the status of the processes
|
* Check the status of the processes
|
||||||
```
|
```
|
||||||
systemctl status [process_name]
|
systemctl status kubelet
|
||||||
```
|
```
|
||||||
|
|
||||||
* Check the logs
|
* Check the logs
|
||||||
```
|
```
|
||||||
journalctl -ae -u [process_name]
|
journalctl -ae -u kubelet
|
||||||
```
|
```
|
||||||
|
|
||||||
* Check the NAT rules
|
* Check the NAT rules
|
||||||
@@ -109,15 +183,26 @@ journalctl -ae -u [process_name]
|
|||||||
iptables -nLv -t nat
|
iptables -nLv -t nat
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For the master nodes you'll have to see the docker logs for the apiserver
|
||||||
|
```
|
||||||
|
docker logs [apiserver docker id]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Available apps, installation procedure
|
||||||
|
|
||||||
|
There are two ways of installing new apps
|
||||||
|
|
||||||
|
#### Ansible galaxy
|
||||||
|
|
||||||
#### Available apps, installation procedure
|
|
||||||
Additionnal apps can be installed with ```ansible-galaxy```.
|
Additionnal apps can be installed with ```ansible-galaxy```.
|
||||||
|
|
||||||
you'll need to edit the file '*requirements.yml*' in order to chose needed apps.
|
ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
|
||||||
The list of available apps are available [there](https://github.com/ansibl8s)
|
The list of available apps are available [there](https://github.com/ansibl8s)
|
||||||
|
|
||||||
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
|
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
|
||||||
In order to use this role you'll need the following entries in the file '*requirements.yml*'
|
In order to use this role you'll need the following entries in the file '*requirements.yml*'
|
||||||
|
Please refer to the [k8s-kubedns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
|
||||||
```
|
```
|
||||||
- src: https://github.com/ansibl8s/k8s-common.git
|
- src: https://github.com/ansibl8s/k8s-common.git
|
||||||
path: roles/apps
|
path: roles/apps
|
||||||
@@ -139,16 +224,34 @@ Then download the roles with ansible-galaxy
|
|||||||
ansible-galaxy install -r requirements.yml
|
ansible-galaxy install -r requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally update your playbook with the chosen role, and run it
|
#### Git submodules
|
||||||
|
Alternatively the roles can be installed as git submodules.
|
||||||
|
That way is easier if you want to do some changes and commit them.
|
||||||
|
|
||||||
|
You can list available submodules with the following command:
|
||||||
|
```
|
||||||
|
grep path .gitmodules | sed 's/.*= //'
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to install the dns addon you'll need to follow these steps
|
||||||
|
```
|
||||||
|
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
|
||||||
|
git submodule update
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
|
||||||
```
|
```
|
||||||
...
|
...
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/master, tags: master }
|
|
||||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
|
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/inventory.cfg apps.yml -u root
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Calico networking
|
#### Calico networking
|
||||||
Check if the calico-node container is running
|
Check if the calico-node container is running
|
||||||
@@ -173,38 +276,4 @@ calicoctl endpoint show --detail
|
|||||||
```
|
```
|
||||||
#### Flannel networking
|
#### Flannel networking
|
||||||
|
|
||||||
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.0/basicstutorials.html)
|
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)
|
||||||
|
|
||||||
Known issues
|
|
||||||
-------------
|
|
||||||
### Node reboot and Calico
|
|
||||||
There is a major issue with calico-kubernetes version 0.5.1 and kubernetes prior to 1.1 :
|
|
||||||
After host reboot, the pods networking are not configured again, they are started without any network configuration.
|
|
||||||
This issue will be fixed when kubernetes 1.1 will be released as described in this [issue](https://github.com/projectcalico/calico-kubernetes/issues/34)
|
|
||||||
|
|
||||||
### Monitoring addon
|
|
||||||
Until now i didn't managed to get the monitoring addon working.
|
|
||||||
|
|
||||||
### Apiserver listen on secure port only
|
|
||||||
Currently the api-server listens on both secure and insecure ports.
|
|
||||||
The insecure port is mainly used for calico.
|
|
||||||
Will be fixed soon.
|
|
||||||
|
|
||||||
How to contribute
|
|
||||||
------------------
|
|
||||||
|
|
||||||
### Update available roles
|
|
||||||
Alternatively the roles can be installed as git submodules.
|
|
||||||
That way is easier if you want to do some changes and commit them.
|
|
||||||
|
|
||||||
You can list available submodules with the following command:
|
|
||||||
```
|
|
||||||
grep path .gitmodules | sed 's/.*= //'
|
|
||||||
```
|
|
||||||
|
|
||||||
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
|
|
||||||
In order to use this role you'll need to follow these steps
|
|
||||||
```
|
|
||||||
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
|
|
||||||
git submodule update
|
|
||||||
```
|
|
||||||
|
|||||||
29
apps.yml
Normal file
29
apps.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
- hosts: kube-master
|
||||||
|
roles:
|
||||||
|
# System
|
||||||
|
- { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] }
|
||||||
|
|
||||||
|
# Databases
|
||||||
|
- { role: apps/k8s-postgres, tags: 'postgres' }
|
||||||
|
- { role: apps/k8s-elasticsearch, tags: 'elasticsearch' }
|
||||||
|
- { role: apps/k8s-memcached, tags: 'memcached' }
|
||||||
|
- { role: apps/k8s-redis, tags: 'redis' }
|
||||||
|
|
||||||
|
# Msg Broker
|
||||||
|
- { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
|
||||||
|
|
||||||
|
# Monitoring
|
||||||
|
- { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']}
|
||||||
|
- { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']}
|
||||||
|
- { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']}
|
||||||
|
|
||||||
|
# logging
|
||||||
|
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
|
||||||
|
|
||||||
|
# Console
|
||||||
|
- { role: apps/k8s-fabric8, tags: 'fabric8' }
|
||||||
|
- { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']}
|
||||||
|
|
||||||
|
# ETCD
|
||||||
|
- { role: apps/k8s-etcd, tags: 'etcd'}
|
||||||
13
cluster.yml
13
cluster.yml
@@ -6,18 +6,13 @@
|
|||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker }
|
||||||
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: etcd, tags: etcd }
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: dnsmasq, tags: dnsmasq }
|
||||||
|
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
# Apps to be installed
|
|
||||||
# - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
|
||||||
# - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
|
|
||||||
|
|
||||||
- hosts: kube-node
|
|
||||||
roles:
|
|
||||||
- { role: kubernetes/node, tags: node }
|
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
|
||||||
local_release_dir: "/tmp/releases"
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
[downloader]
|
|
||||||
172.16.0.1
|
|
||||||
|
|
||||||
[kube-master]
|
|
||||||
# NB : the br_addr must be in the {{ calico_pool }} subnet
|
|
||||||
# it will assign a /24 subnet per node
|
|
||||||
172.16.0.1 br_addr=10.233.64.1
|
|
||||||
|
|
||||||
[etcd]
|
|
||||||
172.16.0.1
|
|
||||||
|
|
||||||
[kube-node:children]
|
|
||||||
usa
|
|
||||||
france
|
|
||||||
|
|
||||||
[usa]
|
|
||||||
172.16.0.1 br_addr=10.233.64.1
|
|
||||||
# Configure the as assigned to the each node if bgp peering with border routers is enabled
|
|
||||||
172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
|
|
||||||
172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
|
|
||||||
|
|
||||||
[france]
|
|
||||||
192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
|
|
||||||
192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
|
|
||||||
|
|
||||||
[k8s-cluster:children]
|
|
||||||
kube-node
|
|
||||||
kube-master
|
|
||||||
|
|
||||||
# If you want to configure bgp peering with border router you'll need to set the following vars
|
|
||||||
# List of routers and their as number
|
|
||||||
#[usa:vars]
|
|
||||||
#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
|
|
||||||
#
|
|
||||||
#[france:vars]
|
|
||||||
#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
|
||||||
local_release_dir: "/tmp/releases"
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
|
||||||
# kube_users:
|
|
||||||
# kube:
|
|
||||||
# pass: changeme
|
|
||||||
# role: admin
|
|
||||||
# root:
|
|
||||||
# pass: changeme
|
|
||||||
# role: admin
|
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
|
||||||
# cluster_name: cluster.local
|
|
||||||
#
|
|
||||||
# set this variable to calico if needed. keep it empty if flannel is used
|
|
||||||
# overlay_network_plugin: calico
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
|
||||||
# kube_service_addresses: 10.233.0.0/18
|
|
||||||
|
|
||||||
# internal network. When used, it will assign IP
|
|
||||||
# addresses from this range to individual pods.
|
|
||||||
# This network must be unused in your network infrastructure!
|
|
||||||
# overlay_network_subnet: 10.233.64.0/18
|
|
||||||
|
|
||||||
# internal network total size (optional). This is the prefix of the
|
|
||||||
# entire overlay network. So the entirety of 4.0.0.0/16 must be
|
|
||||||
# unused in your environment.
|
|
||||||
# overlay_network_prefix: 18
|
|
||||||
|
|
||||||
# internal network node size allocation (optional). This is the size allocated
|
|
||||||
# to each node on your network. With these defaults you should have
|
|
||||||
# room for 4096 nodes with 254 pods per node.
|
|
||||||
# overlay_network_host_prefix: 24
|
|
||||||
|
|
||||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
|
||||||
# peer_with_router: false
|
|
||||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
||||||
# The subnets of each nodes will be distributed by the datacenter router
|
|
||||||
|
|
||||||
# Internal DNS configuration.
|
|
||||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
|
||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
|
||||||
# as it greatly simplifies configuration of your applications - you can use
|
|
||||||
# service names instead of magic environment variables.
|
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
|
||||||
# Kubernetes won't do this for you (yet).
|
|
||||||
|
|
||||||
# Upstream dns servers used by dnsmasq
|
|
||||||
# upstream_dns_servers:
|
|
||||||
# - 8.8.8.8
|
|
||||||
# - 4.4.8.8
|
|
||||||
#
|
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
|
||||||
# dns_setup: true
|
|
||||||
# dns_domain: "{{ cluster_name }}"
|
|
||||||
#
|
|
||||||
# # Ip address of the kubernetes dns service
|
|
||||||
# dns_server: 10.233.0.10
|
|
||||||
@@ -1,17 +1,27 @@
|
|||||||
|
# Directory where the binaries will be installed
|
||||||
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
|
# Where the binaries will be downloaded.
|
||||||
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
local_release_dir: "/tmp/releases"
|
||||||
|
|
||||||
|
# Cluster Loglevel configuration
|
||||||
|
kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: changeme
|
pass: changeme
|
||||||
role: admin
|
role: admin
|
||||||
root:
|
# root:
|
||||||
pass: changeme
|
# pass: changeme
|
||||||
role: admin
|
# role: admin
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
# Kubernetes cluster name, also will be used as DNS domain
|
||||||
cluster_name: cluster.local
|
cluster_name: cluster.local
|
||||||
#
|
|
||||||
# set this variable to calico if needed. keep it empty if flannel is used
|
# set this variable to calico if needed. keep it empty if flannel is used
|
||||||
overlay_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
# Kubernetes internal network for services, unused block of space.
|
||||||
kube_service_addresses: 10.233.0.0/18
|
kube_service_addresses: 10.233.0.0/18
|
||||||
@@ -19,23 +29,27 @@ kube_service_addresses: 10.233.0.0/18
|
|||||||
# internal network. When used, it will assign IP
|
# internal network. When used, it will assign IP
|
||||||
# addresses from this range to individual pods.
|
# addresses from this range to individual pods.
|
||||||
# This network must be unused in your network infrastructure!
|
# This network must be unused in your network infrastructure!
|
||||||
overlay_network_subnet: 10.233.64.0/18
|
kube_pods_subnet: 10.233.64.0/18
|
||||||
|
|
||||||
# internal network total size (optional). This is the prefix of the
|
# internal network total size (optional). This is the prefix of the
|
||||||
# entire overlay network. So the entirety of 4.0.0.0/16 must be
|
# entire network. Must be unused in your environment.
|
||||||
# unused in your environment.
|
# kube_network_prefix: 18
|
||||||
# overlay_network_prefix: 18
|
|
||||||
|
|
||||||
# internal network node size allocation (optional). This is the size allocated
|
# internal network node size allocation (optional). This is the size allocated
|
||||||
# to each node on your network. With these defaults you should have
|
# to each node on your network. With these defaults you should have
|
||||||
# room for 4096 nodes with 254 pods per node.
|
# room for 4096 nodes with 254 pods per node.
|
||||||
overlay_network_host_prefix: 24
|
kube_network_node_prefix: 24
|
||||||
|
|
||||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||||
peer_with_router: false
|
peer_with_router: false
|
||||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||||
# The subnets of each nodes will be distributed by the datacenter router
|
# The subnets of each nodes will be distributed by the datacenter router
|
||||||
|
|
||||||
|
# The port the API Server will be listening on.
|
||||||
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||||
|
kube_apiserver_port: 443 # (https)
|
||||||
|
kube_apiserver_insecure_port: 8080 # (http)
|
||||||
|
|
||||||
# Internal DNS configuration.
|
# Internal DNS configuration.
|
||||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||||
@@ -48,10 +62,25 @@ peer_with_router: false
|
|||||||
upstream_dns_servers:
|
upstream_dns_servers:
|
||||||
- 8.8.8.8
|
- 8.8.8.8
|
||||||
- 4.4.8.8
|
- 4.4.8.8
|
||||||
|
#
|
||||||
# Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||||
dns_setup: true
|
dns_setup: true
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
#
|
||||||
|
# # Ip address of the kubernetes dns service
|
||||||
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
|
|
||||||
# Ip address of the kubernetes dns service
|
# For multi masters architecture:
|
||||||
dns_server: 10.233.0.10
|
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
||||||
|
# This domain name will be inserted into the /etc/hosts file of all servers
|
||||||
|
# configuration example with haproxy :
|
||||||
|
# listen kubernetes-apiserver-https
|
||||||
|
# bind 10.99.0.21:8383
|
||||||
|
# option ssl-hello-chk
|
||||||
|
# mode tcp
|
||||||
|
# timeout client 3h
|
||||||
|
# timeout server 3h
|
||||||
|
# server master1 10.99.0.26:443
|
||||||
|
# server master2 10.99.0.27:443
|
||||||
|
# balance roundrobin
|
||||||
|
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||||
10
inventory/group_vars/new-york.yml
Normal file
10
inventory/group_vars/new-york.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#---
|
||||||
|
#peers:
|
||||||
|
# -router_id: "10.99.0.34"
|
||||||
|
# as: "65xxx"
|
||||||
|
# - router_id: "10.99.0.35"
|
||||||
|
# as: "65xxx"
|
||||||
|
#
|
||||||
|
#loadbalancer_apiserver:
|
||||||
|
# address: "10.99.0.44"
|
||||||
|
# port: "8383"
|
||||||
10
inventory/group_vars/paris.yml
Normal file
10
inventory/group_vars/paris.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#---
|
||||||
|
#peers:
|
||||||
|
# -router_id: "10.99.0.2"
|
||||||
|
# as: "65xxx"
|
||||||
|
# - router_id: "10.99.0.3"
|
||||||
|
# as: "65xxx"
|
||||||
|
#
|
||||||
|
#loadbalancer_apiserver:
|
||||||
|
# address: "10.99.0.21"
|
||||||
|
# port: "8383"
|
||||||
32
inventory/inventory.example
Normal file
32
inventory/inventory.example
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
[downloader]
|
||||||
|
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node3 ansible_ssh_host=10.99.0.4
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node3 ansible_ssh_host=10.99.0.4
|
||||||
|
node4 ansible_ssh_host=10.99.0.5
|
||||||
|
node5 ansible_ssh_host=10.99.0.36
|
||||||
|
node6 ansible_ssh_host=10.99.0.37
|
||||||
|
|
||||||
|
[paris]
|
||||||
|
node1 ansible_ssh_host=10.99.0.26
|
||||||
|
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||||
|
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||||
|
|
||||||
|
[new-york]
|
||||||
|
node2 ansible_ssh_host=10.99.0.27
|
||||||
|
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||||
|
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
17
inventory/local-tests.cfg
Normal file
17
inventory/local-tests.cfg
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
|
||||||
|
|
||||||
|
[downloader]
|
||||||
|
node1
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node1
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
@@ -1,19 +1,19 @@
|
|||||||
---
|
---
|
||||||
- src: https://github.com/ansibl8s/k8s-common.git
|
- src: https://github.com/ansibl8s/k8s-common.git
|
||||||
path: roles/apps
|
path: roles/apps
|
||||||
# version: v1.0
|
version: v1.0
|
||||||
|
|
||||||
- src: https://github.com/ansibl8s/k8s-skydns.git
|
- src: https://github.com/ansibl8s/k8s-kubedns.git
|
||||||
path: roles/apps
|
path: roles/apps
|
||||||
# version: v1.0
|
version: v1.0
|
||||||
|
|
||||||
#- src: https://github.com/ansibl8s/k8s-kube-ui.git
|
#- src: https://github.com/ansibl8s/k8s-kube-ui.git
|
||||||
# path: roles/apps
|
# path: roles/apps
|
||||||
# # version: v1.0
|
# version: v1.0
|
||||||
#
|
#
|
||||||
#- src: https://github.com/ansibl8s/k8s-fabric8.git
|
#- src: https://github.com/ansibl8s/k8s-fabric8.git
|
||||||
# path: roles/apps
|
# path: roles/apps
|
||||||
# # version: v1.0
|
# version: v1.0
|
||||||
#
|
#
|
||||||
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git
|
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||||
# path: roles/apps
|
# path: roles/apps
|
||||||
@@ -25,12 +25,17 @@
|
|||||||
#
|
#
|
||||||
#- src: https://github.com/ansibl8s/k8s-memcached.git
|
#- src: https://github.com/ansibl8s/k8s-memcached.git
|
||||||
# path: roles/apps
|
# path: roles/apps
|
||||||
# # version: v1.0
|
# version: v1.0
|
||||||
#
|
|
||||||
#- src: https://github.com/ansibl8s/k8s-haproxy.git
|
|
||||||
# path: roles/apps
|
|
||||||
# # version: v1.0
|
|
||||||
#
|
#
|
||||||
#- src: https://github.com/ansibl8s/k8s-postgres.git
|
#- src: https://github.com/ansibl8s/k8s-postgres.git
|
||||||
# path: roles/apps
|
# path: roles/apps
|
||||||
# # version: v1.0
|
# version: v1.0
|
||||||
|
#
|
||||||
|
#- src: https://github.com/ansibl8s/k8s-heapster.git
|
||||||
|
# path: roles/apps
|
||||||
|
#
|
||||||
|
#- src: https://github.com/ansibl8s/k8s-influxdb.git
|
||||||
|
# path: roles/apps
|
||||||
|
#
|
||||||
|
#- src: https://github.com/ansibl8s/k8s-kubedash.git
|
||||||
|
# path: roles/apps
|
||||||
|
|||||||
Submodule roles/apps/k8s-common updated: 1b0318421f...c69c5f881f
Submodule roles/apps/k8s-elasticsearch updated: 2de264f007...3d74c70a4a
1
roles/apps/k8s-etcd
Submodule
1
roles/apps/k8s-etcd
Submodule
Submodule roles/apps/k8s-etcd added at abd61ee91a
Submodule roles/apps/k8s-haproxy deleted from c17312c4df
1
roles/apps/k8s-heapster
Submodule
1
roles/apps/k8s-heapster
Submodule
Submodule roles/apps/k8s-heapster added at 44a6519bf8
1
roles/apps/k8s-influxdb
Submodule
1
roles/apps/k8s-influxdb
Submodule
Submodule roles/apps/k8s-influxdb added at 38d54c48e7
1
roles/apps/k8s-kube-logstash
Submodule
1
roles/apps/k8s-kube-logstash
Submodule
Submodule roles/apps/k8s-kube-logstash added at 256fa156e4
1
roles/apps/k8s-kubedash
Submodule
1
roles/apps/k8s-kubedash
Submodule
Submodule roles/apps/k8s-kubedash added at 64385696a9
Submodule roles/apps/k8s-kubedns updated: 382e1d8bfc...b5015aed8f
Submodule roles/apps/k8s-memcached updated: aad14ddd99...563b35f3b6
Submodule roles/apps/k8s-postgres updated: 582f8ec9f8...e219c91391
1
roles/apps/k8s-rabbitmq
Submodule
1
roles/apps/k8s-rabbitmq
Submodule
Submodule roles/apps/k8s-rabbitmq added at b91f96bb9c
Submodule roles/apps/k8s-redis updated: 86495a2152...a4e134fef3
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
- name: restart dnsmasq
|
|
||||||
command: systemctl restart dnsmasq
|
|
||||||
@@ -5,54 +5,97 @@
|
|||||||
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
|
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
|
||||||
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
|
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
|
||||||
state: present
|
state: present
|
||||||
|
backup: yes
|
||||||
when: hostvars[item].ansible_default_ipv4.address is defined
|
when: hostvars[item].ansible_default_ipv4.address is defined
|
||||||
with_items: groups['all']
|
with_items: groups['all']
|
||||||
|
|
||||||
|
- name: populate kubernetes loadbalancer address into hosts file
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/hosts
|
||||||
|
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
|
||||||
|
line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local"
|
||||||
|
state: present
|
||||||
|
backup: yes
|
||||||
|
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
|
||||||
|
|
||||||
- name: clean hosts file
|
- name: clean hosts file
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/hosts
|
dest: /etc/hosts
|
||||||
regexp: "{{ item }}"
|
regexp: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
backup: yes
|
||||||
with_items:
|
with_items:
|
||||||
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
|
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
|
||||||
- '^::1(\s+){{ inventory_hostname }}.*'
|
- '^::1(\s+){{ inventory_hostname }}.*'
|
||||||
|
|
||||||
- name: install dnsmasq and bindr9utils
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- dnsmasq
|
|
||||||
- bind9utils
|
|
||||||
when: inventory_hostname in groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d directory exists
|
- name: ensure dnsmasq.d directory exists
|
||||||
file:
|
file:
|
||||||
path: /etc/dnsmasq.d
|
path: /etc/dnsmasq.d
|
||||||
state: directory
|
state: directory
|
||||||
when: inventory_hostname in groups['kube-master'][0]
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: configure dnsmasq
|
- name: configure dnsmasq
|
||||||
template:
|
template:
|
||||||
src: 01-kube-dns.conf.j2
|
src: 01-kube-dns.conf.j2
|
||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
mode: 755
|
mode: 755
|
||||||
notify:
|
backup: yes
|
||||||
- restart dnsmasq
|
when: inventory_hostname in groups['kube-master']
|
||||||
when: inventory_hostname in groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: enable dnsmasq
|
- name: create dnsmasq pod template
|
||||||
service:
|
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
|
||||||
name: dnsmasq
|
when: inventory_hostname in groups['kube-master']
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
when: inventory_hostname in groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: update resolv.conf with new DNS setup
|
- name: Check for dnsmasq port
|
||||||
template:
|
wait_for:
|
||||||
src: resolv.conf.j2
|
port: 53
|
||||||
dest: /etc/resolv.conf
|
delay: 5
|
||||||
mode: 644
|
timeout: 100
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
|
- name: check resolvconf
|
||||||
|
stat: path=/etc/resolvconf/resolv.conf.d/head
|
||||||
|
register: resolvconf
|
||||||
|
|
||||||
|
- name: target resolv.conf file
|
||||||
|
set_fact:
|
||||||
|
resolvconffile: >
|
||||||
|
{%- if resolvconf.stat.exists == True -%}
|
||||||
|
/etc/resolvconf/resolv.conf.d/head
|
||||||
|
{%- else -%}
|
||||||
|
/etc/resolv.conf
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
- name: Add search resolv.conf
|
||||||
|
lineinfile:
|
||||||
|
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
insertafter: EOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
|
||||||
|
- name: Add all masters as nameserver
|
||||||
|
lineinfile:
|
||||||
|
line: nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }}
|
||||||
|
dest: "{{resolvconffile}}"
|
||||||
|
state: present
|
||||||
|
insertafter: EOF
|
||||||
|
backup: yes
|
||||||
|
follow: yes
|
||||||
|
with_items: groups['kube-master']
|
||||||
|
|
||||||
- name: disable resolv.conf modification by dhclient
|
- name: disable resolv.conf modification by dhclient
|
||||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x backup=yes
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: disable resolv.conf modification by dhclient
|
||||||
|
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
- name: update resolvconf
|
||||||
|
command: resolvconf -u
|
||||||
|
changed_when: False
|
||||||
|
when: resolvconf.stat.exists == True
|
||||||
|
|
||||||
|
- meta: flush_handlers
|
||||||
|
|||||||
49
roles/dnsmasq/templates/dnsmasq-pod.yml
Normal file
49
roles/dnsmasq/templates/dnsmasq-pod.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: dnsmasq
|
||||||
|
image: andyshinn/dnsmasq:2.72
|
||||||
|
command:
|
||||||
|
- dnsmasq
|
||||||
|
args:
|
||||||
|
- -k
|
||||||
|
- "-7"
|
||||||
|
- /etc/dnsmasq.d
|
||||||
|
- --local-service
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_ADMIN
|
||||||
|
imagePullPolicy: Always
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 256M
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
containerPort: 53
|
||||||
|
hostPort: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
containerPort: 53
|
||||||
|
hostPort: 53
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: etcdnsmasqd
|
||||||
|
mountPath: /etc/dnsmasq.d
|
||||||
|
- name: etcdnsmasqdavailable
|
||||||
|
mountPath: /etc/dnsmasq.d-available
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: etcdnsmasqd
|
||||||
|
hostPath:
|
||||||
|
path: /etc/dnsmasq.d
|
||||||
|
- name: etcdnsmasqdavailable
|
||||||
|
hostPath:
|
||||||
|
path: /etc/dnsmasq.d-available
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
; generated by ansible
|
|
||||||
search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
|
|
||||||
{% for host in groups['kube-master'] %}
|
|
||||||
nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
|
|
||||||
{% endfor %}
|
|
||||||
2
roles/docker/.gitignore
vendored
Normal file
2
roles/docker/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.*.swp
|
||||||
|
.vagrant
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Docker Application Container Engine
|
|
||||||
Documentation=https://docs.docker.com
|
|
||||||
After=network.target docker.socket
|
|
||||||
Requires=docker.socket
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=-/etc/default/docker
|
|
||||||
Type=notify
|
|
||||||
ExecStart=/usr/bin/docker daemon -H fd:// $DOCKER_OPTS
|
|
||||||
MountFlags=slave
|
|
||||||
LimitNOFILE=1048576
|
|
||||||
LimitNPROC=1048576
|
|
||||||
LimitCORE=infinity
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- name: restart docker
|
|
||||||
command: /bin/true
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart docker service
|
|
||||||
|
|
||||||
- name: reload systemd
|
|
||||||
shell: systemctl daemon-reload
|
|
||||||
|
|
||||||
- name: restart docker service
|
|
||||||
service: name=docker state=restarted
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Write script for calico/docker bridge configuration
|
|
||||||
template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
|
|
||||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
|
||||||
|
|
||||||
- name: Configure calico/docker bridge
|
|
||||||
shell: /etc/network/if-up.d/create_cbr
|
|
||||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
|
||||||
|
|
||||||
- name: Configure docker to use cbr0 bridge
|
|
||||||
lineinfile:
|
|
||||||
dest=/etc/default/docker
|
|
||||||
regexp='.*DOCKER_OPTS=.*'
|
|
||||||
line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
|
|
||||||
notify:
|
|
||||||
- restart docker
|
|
||||||
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
|
|
||||||
|
|
||||||
- name: enable docker
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
|
||||||
|
|
||||||
#- name: login to arkena's docker registry
|
|
||||||
# shell : >
|
|
||||||
# docker login --username={{ dockerhub_user }}
|
|
||||||
# --password={{ dockerhub_pass }}
|
|
||||||
# --email={{ dockerhub_email }}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Install prerequisites for https transport
|
|
||||||
apt: pkg={{ item }} state=present update_cache=yes
|
|
||||||
with_items:
|
|
||||||
- apt-transport-https
|
|
||||||
- ca-certificates
|
|
||||||
|
|
||||||
- name: Configure docker apt repository
|
|
||||||
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list
|
|
||||||
|
|
||||||
- name: Install docker-engine
|
|
||||||
apt: pkg={{ item }} state=present force=yes update_cache=yes
|
|
||||||
with_items:
|
|
||||||
- aufs-tools
|
|
||||||
- cgroupfs-mount
|
|
||||||
- docker-engine=1.8.2-0~{{ ansible_distribution_release }}
|
|
||||||
|
|
||||||
- name: Copy default docker configuration
|
|
||||||
template: src=default-docker.j2 dest=/etc/default/docker
|
|
||||||
notify: restart docker
|
|
||||||
|
|
||||||
- name: Copy Docker systemd unit file
|
|
||||||
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service
|
|
||||||
notify: restart docker
|
|
||||||
@@ -1,3 +1,53 @@
|
|||||||
---
|
---
|
||||||
- include: install.yml
|
- name: gather os specific variables
|
||||||
- include: configure.yml
|
include_vars: "{{ item }}"
|
||||||
|
with_first_found:
|
||||||
|
- files:
|
||||||
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||||
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||||
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||||
|
- "{{ ansible_distribution|lower }}.yml"
|
||||||
|
- "{{ ansible_os_family|lower }}.yml"
|
||||||
|
- defaults.yml
|
||||||
|
paths:
|
||||||
|
- ../vars
|
||||||
|
|
||||||
|
- name: check for minimum kernel version
|
||||||
|
fail:
|
||||||
|
msg: >
|
||||||
|
docker requires a minimum kernel version of
|
||||||
|
{{ docker_kernel_min_version }} on
|
||||||
|
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||||
|
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
|
||||||
|
|
||||||
|
- name: ensure docker requirements packages are installed
|
||||||
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
|
args: docker_package_info.args
|
||||||
|
with_items: docker_package_info.pre_pkgs
|
||||||
|
when: docker_package_info.pre_pkgs|length > 0
|
||||||
|
|
||||||
|
- name: ensure docker repository public key is installed
|
||||||
|
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||||
|
args: docker_repo_key_info.args
|
||||||
|
with_items: docker_repo_key_info.repo_keys
|
||||||
|
when: docker_repo_key_info.repo_keys|length > 0
|
||||||
|
|
||||||
|
- name: ensure docker repository is enabled
|
||||||
|
action: "{{ docker_repo_info.pkg_repo }}"
|
||||||
|
args: docker_repo_info.args
|
||||||
|
with_items: docker_repo_info.repos
|
||||||
|
when: docker_repo_info.repos|length > 0
|
||||||
|
|
||||||
|
- name: ensure docker packages are installed
|
||||||
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
|
args: docker_package_info.args
|
||||||
|
with_items: docker_package_info.pkgs
|
||||||
|
when: docker_package_info.pkgs|length > 0
|
||||||
|
|
||||||
|
- name: ensure docker service is started and enabled
|
||||||
|
service:
|
||||||
|
name: "{{ item }}"
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
with_items:
|
||||||
|
- docker
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Create calico bridge cbr0 if it doesn't exist
|
|
||||||
ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
|
|
||||||
if ! [[ "${ifaces}" =~ "cbr0" ]];then
|
|
||||||
brctl addbr cbr0
|
|
||||||
ip link set cbr0 up
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure calico bridge ip
|
|
||||||
br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
|
|
||||||
if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
|
|
||||||
ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
|
|
||||||
fi
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
# Docker Upstart and SysVinit configuration file
|
|
||||||
|
|
||||||
# Customize location of Docker binary (especially for development testing).
|
|
||||||
#DOCKER="/usr/local/bin/docker"
|
|
||||||
|
|
||||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
|
||||||
{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %}
|
|
||||||
DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
|
||||||
#export http_proxy="http://127.0.0.1:3128/"
|
|
||||||
|
|
||||||
# This is also a handy place to tweak where Docker's temporary files go.
|
|
||||||
#export TMPDIR="/mnt/bigdrive/docker-tmp"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main
|
|
||||||
24
roles/docker/vars/centos-6.yml
Normal file
24
roles/docker/vars/centos-6.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
docker_kernel_min_version: '2.6.32-431'
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkg_mgr: yum
|
||||||
|
args:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
pre_pkgs:
|
||||||
|
- epel-release
|
||||||
|
- curl
|
||||||
|
- device-mapper-libs
|
||||||
|
pkgs:
|
||||||
|
- docker-io
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
pkg_key: ''
|
||||||
|
args: {}
|
||||||
|
repo_keys: []
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
pkg_repo: ''
|
||||||
|
args: {}
|
||||||
|
repos: []
|
||||||
36
roles/docker/vars/debian.yml
Normal file
36
roles/docker/vars/debian.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
docker_kernel_min_version: '3.2'
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkg_mgr: apt
|
||||||
|
args:
|
||||||
|
pkg: "{{ item }}"
|
||||||
|
update_cache: yes
|
||||||
|
cache_valid_time: 600
|
||||||
|
state: latest
|
||||||
|
pre_pkgs:
|
||||||
|
- apt-transport-https
|
||||||
|
- curl
|
||||||
|
- software-properties-common
|
||||||
|
pkgs:
|
||||||
|
- docker-engine
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
pkg_key: apt_key
|
||||||
|
args:
|
||||||
|
id: "{{ item }}"
|
||||||
|
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||||
|
state: present
|
||||||
|
repo_keys:
|
||||||
|
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
pkg_repo: apt_repository
|
||||||
|
args:
|
||||||
|
repo: "{{ item }}"
|
||||||
|
update_cache: yes
|
||||||
|
state: present
|
||||||
|
repos:
|
||||||
|
- >
|
||||||
|
deb https://apt.dockerproject.org/repo
|
||||||
|
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||||
|
main
|
||||||
22
roles/docker/vars/fedora-20.yml
Normal file
22
roles/docker/vars/fedora-20.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkg_mgr: yum
|
||||||
|
args:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
pre_pkgs:
|
||||||
|
- curl
|
||||||
|
pkgs:
|
||||||
|
- docker-io
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
pkg_key: ''
|
||||||
|
args: {}
|
||||||
|
repo_keys: []
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
pkg_repo: ''
|
||||||
|
args: {}
|
||||||
|
repos: []
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
#dockerhub_user:
|
|
||||||
#dockerhub_pass:
|
|
||||||
#dockerhub_email:
|
|
||||||
22
roles/docker/vars/redhat.yml
Normal file
22
roles/docker/vars/redhat.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
|
docker_package_info:
|
||||||
|
pkg_mgr: yum
|
||||||
|
args:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
pre_pkgs:
|
||||||
|
- curl
|
||||||
|
pkgs:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
docker_repo_key_info:
|
||||||
|
pkg_key: ''
|
||||||
|
args: {}
|
||||||
|
repo_keys: []
|
||||||
|
|
||||||
|
docker_repo_info:
|
||||||
|
pkg_repo: ''
|
||||||
|
args: {}
|
||||||
|
repos: []
|
||||||
@@ -1,5 +1,42 @@
|
|||||||
---
|
---
|
||||||
etcd_download_url: https://github.com/coreos/etcd/releases/download
|
local_release_dir: /tmp
|
||||||
flannel_download_url: https://github.com/coreos/flannel/releases/download
|
|
||||||
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
|
flannel_version: 0.5.5
|
||||||
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
|
calico_version: v0.13.0
|
||||||
|
calico_plugin_version: v0.7.0
|
||||||
|
kube_version: v1.1.3
|
||||||
|
|
||||||
|
kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d"
|
||||||
|
kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482"
|
||||||
|
|
||||||
|
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
|
||||||
|
|
||||||
|
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
|
||||||
|
|
||||||
|
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
|
||||||
|
|
||||||
|
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
|
||||||
|
|
||||||
|
downloads:
|
||||||
|
- name: calico
|
||||||
|
dest: calico/bin/calicoctl
|
||||||
|
url: "{{calico_download_url}}"
|
||||||
|
|
||||||
|
- name: calico-plugin
|
||||||
|
dest: calico/bin/calico
|
||||||
|
url: "{{calico_plugin_download_url}}"
|
||||||
|
|
||||||
|
- name: flannel
|
||||||
|
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
|
||||||
|
url: "{{flannel_download_url}}"
|
||||||
|
unarchive: yes
|
||||||
|
|
||||||
|
- name: kubernetes-kubelet
|
||||||
|
dest: kubernetes/bin/kubelet
|
||||||
|
sha256: "{{kubelet_checksum}}"
|
||||||
|
url: "{{ kube_download_url }}/kubelet"
|
||||||
|
|
||||||
|
- name: kubernetes-kubectl
|
||||||
|
dest: kubernetes/bin/kubectl
|
||||||
|
sha256: "{{kubectl_checksum}}"
|
||||||
|
url: "{{ kube_download_url }}/kubectl"
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create calico release directory
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/calico/bin
|
|
||||||
recurse=yes
|
|
||||||
state=directory
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Check if calicoctl has been downloaded
|
|
||||||
local_action: stat
|
|
||||||
path={{ local_release_dir }}/calico/bin/calicoctl
|
|
||||||
register: c_tar
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
# issues with get_url module and redirects, to be tested again in the near future
|
|
||||||
- name: Download calico
|
|
||||||
local_action: shell
|
|
||||||
curl -o {{ local_release_dir }}/calico/bin/calicoctl -Ls {{ calico_download_url }}/{{ calico_version }}/calicoctl
|
|
||||||
when: not c_tar.stat.exists
|
|
||||||
register: dl_calico
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create etcd release directory
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/etcd/bin
|
|
||||||
recurse=yes
|
|
||||||
state=directory
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Check if etcd release archive has been downloaded
|
|
||||||
local_action: stat
|
|
||||||
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
|
|
||||||
register: e_tar
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
# issues with get_url module and redirects, to be tested again in the near future
|
|
||||||
- name: Download etcd
|
|
||||||
local_action: shell
|
|
||||||
curl -o {{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz -Ls {{ etcd_download_url }}/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
|
|
||||||
when: not e_tar.stat.exists
|
|
||||||
register: dl_etcd
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Extract etcd archive
|
|
||||||
local_action: unarchive
|
|
||||||
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
|
|
||||||
dest={{ local_release_dir }}/etcd copy=no
|
|
||||||
when: dl_etcd|changed
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Pick up only etcd binaries
|
|
||||||
local_action: copy
|
|
||||||
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/{{ item }}
|
|
||||||
dest={{ local_release_dir }}/etcd/bin
|
|
||||||
with_items:
|
|
||||||
- etcdctl
|
|
||||||
- etcd
|
|
||||||
when: dl_etcd|changed
|
|
||||||
|
|
||||||
- name: Delete unused etcd files
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64 state=absent
|
|
||||||
when: dl_etcd|changed
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create flannel release directory
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/flannel
|
|
||||||
recurse=yes
|
|
||||||
state=directory
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Check if flannel release archive has been downloaded
|
|
||||||
local_action: stat
|
|
||||||
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
|
|
||||||
register: f_tar
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
# issues with get_url module and redirects, to be tested again in the near future
|
|
||||||
- name: Download flannel
|
|
||||||
local_action: shell
|
|
||||||
curl -o {{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz -Ls {{ flannel_download_url }}/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz
|
|
||||||
when: not f_tar.stat.exists
|
|
||||||
register: dl_flannel
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Extract flannel archive
|
|
||||||
local_action: unarchive
|
|
||||||
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
|
|
||||||
dest={{ local_release_dir }}/flannel copy=no
|
|
||||||
when: dl_flannel|changed
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Pick up only flannel binaries
|
|
||||||
local_action: copy
|
|
||||||
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}/flanneld
|
|
||||||
dest={{ local_release_dir }}/flannel/bin
|
|
||||||
when: dl_flannel|changed
|
|
||||||
|
|
||||||
- name: Delete unused flannel files
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }} state=absent
|
|
||||||
when: dl_flannel|changed
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create kubernetes release directory
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/kubernetes
|
|
||||||
state=directory
|
|
||||||
|
|
||||||
- name: Check if kubernetes release archive has been downloaded
|
|
||||||
local_action: stat
|
|
||||||
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
|
|
||||||
register: k_tar
|
|
||||||
|
|
||||||
# issues with get_url module and redirects, to be tested again in the near future
|
|
||||||
- name: Download kubernetes
|
|
||||||
local_action: shell
|
|
||||||
curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
|
|
||||||
when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
|
|
||||||
register: dl_kube
|
|
||||||
|
|
||||||
- name: Compare kubernetes archive checksum
|
|
||||||
local_action: stat
|
|
||||||
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
|
|
||||||
register: k_tar
|
|
||||||
failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
|
|
||||||
when: dl_kube|changed
|
|
||||||
|
|
||||||
- name: Extract kubernetes archive
|
|
||||||
local_action: unarchive
|
|
||||||
src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
|
|
||||||
dest={{ local_release_dir }}/kubernetes copy=no
|
|
||||||
when: dl_kube|changed
|
|
||||||
|
|
||||||
- name: Extract kubernetes binaries archive
|
|
||||||
local_action: unarchive
|
|
||||||
src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
|
|
||||||
dest={{ local_release_dir }}/kubernetes copy=no
|
|
||||||
when: dl_kube|changed
|
|
||||||
|
|
||||||
- name: Pick up only kubernetes binaries
|
|
||||||
local_action: synchronize
|
|
||||||
src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
|
|
||||||
dest={{ local_release_dir }}/kubernetes
|
|
||||||
when: dl_kube|changed
|
|
||||||
|
|
||||||
- name: Delete unused kubernetes files
|
|
||||||
local_action: file
|
|
||||||
path={{ local_release_dir }}/kubernetes/kubernetes state=absent
|
|
||||||
when: dl_kube|changed
|
|
||||||
@@ -1,5 +1,19 @@
|
|||||||
---
|
---
|
||||||
- include: kubernetes.yml
|
- name: Create dest directories
|
||||||
- include: etcd.yml
|
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
|
||||||
- include: calico.yml
|
with_items: downloads
|
||||||
- include: flannel.yml
|
|
||||||
|
- name: Download items
|
||||||
|
get_url:
|
||||||
|
url: "{{item.url}}"
|
||||||
|
dest: "{{local_release_dir}}/{{item.dest}}"
|
||||||
|
sha256sum: "{{item.sha256 | default(omit)}}"
|
||||||
|
with_items: downloads
|
||||||
|
|
||||||
|
- name: Extract archives
|
||||||
|
unarchive:
|
||||||
|
src: "{{ local_release_dir }}/{{item.dest}}"
|
||||||
|
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
|
||||||
|
copy: no
|
||||||
|
when: "{{item.unarchive is defined and item.unarchive == True}}"
|
||||||
|
with_items: downloads
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
etcd_version: v2.2.0
|
|
||||||
flannel_version: 0.5.3
|
|
||||||
|
|
||||||
kube_version: v1.0.6
|
|
||||||
kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
|
|
||||||
|
|
||||||
calico_version: v0.5.1
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
- name: restart daemons
|
|
||||||
command: /bin/true
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart etcd2
|
|
||||||
|
|
||||||
- name: reload systemd
|
|
||||||
command: systemctl daemon-reload
|
|
||||||
|
|
||||||
- name: restart etcd2
|
|
||||||
service: name=etcd2 state=restarted
|
|
||||||
|
|
||||||
- name: Save iptables rules
|
|
||||||
command: service iptables save
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Disable ferm
|
|
||||||
service: name=ferm state=stopped enabled=no
|
|
||||||
|
|
||||||
- name: Create etcd2 environment vars dir
|
|
||||||
file: path=/etc/systemd/system/etcd2.service.d state=directory
|
|
||||||
|
|
||||||
- name: Write etcd2 config file
|
|
||||||
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart etcd2
|
|
||||||
|
|
||||||
- name: Ensure etcd2 is running
|
|
||||||
service: name=etcd2 state=started enabled=yes
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create etcd user
|
|
||||||
user: name=etcd shell=/bin/nologin home=/var/lib/etcd2
|
|
||||||
|
|
||||||
- name: Install etcd binaries
|
|
||||||
copy:
|
|
||||||
src={{ local_release_dir }}/etcd/bin/{{ item }}
|
|
||||||
dest={{ bin_dir }}
|
|
||||||
owner=etcd
|
|
||||||
mode=u+x
|
|
||||||
with_items:
|
|
||||||
- etcdctl
|
|
||||||
- etcd
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- name: Create etcd2 binary symlink
|
|
||||||
file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
|
|
||||||
|
|
||||||
- name: Copy etcd2.service systemd file
|
|
||||||
template:
|
|
||||||
src: systemd-etcd2.service.j2
|
|
||||||
dest: /lib/systemd/system/etcd2.service
|
|
||||||
notify: restart daemons
|
|
||||||
@@ -1,3 +1,13 @@
|
|||||||
---
|
---
|
||||||
- include: install.yml
|
- name: ETCD2 | Stop etcd2 service
|
||||||
- include: configure.yml
|
service: name=etcd state=stopped
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: ETCD2 | create etcd pod template
|
||||||
|
template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest
|
||||||
|
|
||||||
|
- name: ETCD2 | Check for etcd2 port
|
||||||
|
wait_for:
|
||||||
|
port: 2379
|
||||||
|
delay: 5
|
||||||
|
timeout: 100
|
||||||
|
|||||||
54
roles/etcd/templates/etcd-pod.yml
Normal file
54
roles/etcd/templates/etcd-pod.yml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: etcd
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: etcd
|
||||||
|
image: quay.io/coreos/etcd:v2.2.2
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 256M
|
||||||
|
args:
|
||||||
|
{% if inventory_hostname in groups['etcd'] %}
|
||||||
|
- --name
|
||||||
|
- etcd-{{inventory_hostname}}-master
|
||||||
|
- --advertise-client-urls
|
||||||
|
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
|
||||||
|
- --listen-peer-urls
|
||||||
|
- http://0.0.0.0:2380
|
||||||
|
- --initial-advertise-peer-urls
|
||||||
|
- http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380
|
||||||
|
- --data-dir
|
||||||
|
- /var/etcd/data
|
||||||
|
- --initial-cluster-state
|
||||||
|
- new
|
||||||
|
{% else %}
|
||||||
|
- --proxy
|
||||||
|
- 'on'
|
||||||
|
{% endif %}
|
||||||
|
- --listen-client-urls
|
||||||
|
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"
|
||||||
|
- --initial-cluster
|
||||||
|
- "{% for host in groups['etcd'] %}etcd-{{host}}-master=http://{{ hostvars[host]['ip'] | default( hostvars[host]['ansible_default_ipv4']['address']) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
|
||||||
|
- --initial-cluster-token
|
||||||
|
- etcd-k8s-cluster
|
||||||
|
ports:
|
||||||
|
- name: etcd-client
|
||||||
|
containerPort: 2379
|
||||||
|
hostPort: 2379
|
||||||
|
- name: etcd-peer
|
||||||
|
containerPort: 2380
|
||||||
|
hostPort: 2380
|
||||||
|
volumeMounts:
|
||||||
|
- name: varetcd
|
||||||
|
mountPath: /var/etcd
|
||||||
|
readOnly: false
|
||||||
|
volumes:
|
||||||
|
- name: varetcd
|
||||||
|
hostPath:
|
||||||
|
path: /containers/pods/etcd-{{inventory_hostname}}/rootfs/var/etcd
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
# etcd2.0
|
|
||||||
[Service]
|
|
||||||
{% if inventory_hostname in groups['kube-master'] %}
|
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
|
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
|
|
||||||
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
|
|
||||||
Environment="ETCD_INITIAL_CLUSTER_STATE=new"
|
|
||||||
Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
|
|
||||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
|
|
||||||
Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
|
|
||||||
Environment="ETCD_NAME=master"
|
|
||||||
{% else %}
|
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
|
|
||||||
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
|
|
||||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
|
|
||||||
Environment="ETCD_PROXY=on"
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=etcd2
|
|
||||||
Conflicts=etcd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=etcd
|
|
||||||
Environment=ETCD_DATA_DIR=/var/lib/etcd2
|
|
||||||
Environment=ETCD_NAME=%m
|
|
||||||
ExecStart={{ bin_dir }}/etcd2
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10s
|
|
||||||
LimitNOFILE=40000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Caller should set in the ev:
|
|
||||||
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
|
|
||||||
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
|
|
||||||
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
|
|
||||||
# MASTER_NAME - I'm not sure what it is...
|
|
||||||
|
|
||||||
# Also the following will be respected
|
|
||||||
# CERT_DIR - where to place the finished certs
|
|
||||||
# CERT_GROUP - who the group owner of the cert files should be
|
|
||||||
|
|
||||||
cert_ip="${MASTER_IP:="${1}"}"
|
|
||||||
master_name="${MASTER_NAME:="kubernetes"}"
|
|
||||||
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
|
|
||||||
dns_domain="${DNS_DOMAIN:="cluster.local"}"
|
|
||||||
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
|
|
||||||
cert_group="${CERT_GROUP:="kube-cert"}"
|
|
||||||
|
|
||||||
# The following certificate pairs are created:
|
|
||||||
#
|
|
||||||
# - ca (the cluster's certificate authority)
|
|
||||||
# - server
|
|
||||||
# - kubelet
|
|
||||||
# - kubecfg (for kubectl)
|
|
||||||
#
|
|
||||||
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
|
||||||
# the certs that we need.
|
|
||||||
|
|
||||||
# TODO: Add support for discovery on other providers?
|
|
||||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
|
||||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
|
||||||
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
|
||||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
|
||||||
fi
|
|
||||||
|
|
||||||
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
|
|
||||||
trap 'rm -rf "${tmpdir}"' EXIT
|
|
||||||
cd "${tmpdir}"
|
|
||||||
|
|
||||||
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
|
|
||||||
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
|
|
||||||
# but is originally taken from:
|
|
||||||
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
|
||||||
#
|
|
||||||
# To update, do the following:
|
|
||||||
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
|
||||||
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
|
||||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
|
||||||
#
|
|
||||||
# Due to GCS caching of public objects, it may take time for this to be widely
|
|
||||||
# distributed.
|
|
||||||
|
|
||||||
# Calculate the first ip address in the service range
|
|
||||||
octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
|
||||||
((octects[3]+=1))
|
|
||||||
service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
|
|
||||||
|
|
||||||
# Determine appropriete subject alt names
|
|
||||||
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
|
|
||||||
|
|
||||||
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
|
|
||||||
tar xzf easy-rsa.tar.gz > /dev/null
|
|
||||||
cd easy-rsa-master/easyrsa3
|
|
||||||
|
|
||||||
(./easyrsa init-pki > /dev/null 2>&1
|
|
||||||
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
|
|
||||||
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
|
|
||||||
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
|
|
||||||
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
|
|
||||||
# If there was an error in the subshell, just die.
|
|
||||||
# TODO(roberthbailey): add better error handling here
|
|
||||||
echo "=== Failed to generate certificates: Aborting ==="
|
|
||||||
exit 2
|
|
||||||
}
|
|
||||||
|
|
||||||
mkdir -p "$cert_dir"
|
|
||||||
|
|
||||||
cp -p pki/ca.crt "${cert_dir}/ca.crt"
|
|
||||||
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
|
|
||||||
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
|
|
||||||
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
|
|
||||||
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
|
|
||||||
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
|
|
||||||
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
|
|
||||||
|
|
||||||
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
|
|
||||||
for cert in "${CERTS[@]}"; do
|
|
||||||
chgrp "${cert_group}" "${cert_dir}/${cert}"
|
|
||||||
chmod 660 "${cert_dir}/${cert}"
|
|
||||||
done
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
- { role: etcd }
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
#- name: Get create ca cert script from Kubernetes
|
|
||||||
# get_url:
|
|
||||||
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
|
|
||||||
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
|
|
||||||
# force=yes
|
|
||||||
|
|
||||||
- name: certs | install cert generation script
|
|
||||||
copy:
|
|
||||||
src=make-ca-cert.sh
|
|
||||||
dest={{ kube_script_dir }}
|
|
||||||
mode=0500
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
# FIXME This only generates a cert for one master...
|
|
||||||
- name: certs | run cert generation script
|
|
||||||
command:
|
|
||||||
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
|
|
||||||
args:
|
|
||||||
creates: "{{ kube_cert_dir }}/server.crt"
|
|
||||||
environment:
|
|
||||||
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
|
||||||
MASTER_NAME: "{{ inventory_hostname }}"
|
|
||||||
DNS_DOMAIN: "{{ dns_domain }}"
|
|
||||||
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
|
|
||||||
CERT_DIR: "{{ kube_cert_dir }}"
|
|
||||||
CERT_GROUP: "{{ kube_cert_group }}"
|
|
||||||
|
|
||||||
- name: certs | check certificate permissions
|
|
||||||
file:
|
|
||||||
path={{ item }}
|
|
||||||
group={{ kube_cert_group }}
|
|
||||||
owner=kube
|
|
||||||
mode=0440
|
|
||||||
with_items:
|
|
||||||
- "{{ kube_cert_dir }}/ca.crt"
|
|
||||||
- "{{ kube_cert_dir }}/server.crt"
|
|
||||||
- "{{ kube_cert_dir }}/server.key"
|
|
||||||
- "{{ kube_cert_dir }}/kubecfg.crt"
|
|
||||||
- "{{ kube_cert_dir }}/kubecfg.key"
|
|
||||||
- "{{ kube_cert_dir }}/kubelet.crt"
|
|
||||||
- "{{ kube_cert_dir }}/kubelet.key"
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
- name: tokens | copy the token gen script
|
|
||||||
copy:
|
|
||||||
src=kube-gen-token.sh
|
|
||||||
dest={{ kube_script_dir }}
|
|
||||||
mode=u+x
|
|
||||||
|
|
||||||
- name: tokens | generate tokens for master components
|
|
||||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
|
||||||
environment:
|
|
||||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
|
||||||
with_nested:
|
|
||||||
- [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
|
|
||||||
- "{{ groups['kube-master'][0] }}"
|
|
||||||
register: gentoken
|
|
||||||
changed_when: "'Added' in gentoken.stdout"
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- name: tokens | generate tokens for node components
|
|
||||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
|
||||||
environment:
|
|
||||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
|
||||||
with_nested:
|
|
||||||
- [ 'system:kubelet', 'system:proxy' ]
|
|
||||||
- "{{ groups['kube-node'] }}"
|
|
||||||
register: gentoken
|
|
||||||
changed_when: "'Added' in gentoken.stdout"
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
- name: define alias command for kubectl all
|
|
||||||
lineinfile:
|
|
||||||
dest=/etc/bash.bashrc
|
|
||||||
line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
|
|
||||||
regexp='^alias kball=.*$'
|
|
||||||
state=present
|
|
||||||
insertafter=EOF
|
|
||||||
create=True
|
|
||||||
|
|
||||||
- name: create kubernetes config directory
|
|
||||||
file: path={{ kube_config_dir }} state=directory
|
|
||||||
|
|
||||||
- name: create kubernetes script directory
|
|
||||||
file: path={{ kube_script_dir }} state=directory
|
|
||||||
|
|
||||||
- name: Make sure manifest directory exists
|
|
||||||
file: path={{ kube_manifest_dir }} state=directory
|
|
||||||
|
|
||||||
- name: write the global config file
|
|
||||||
template:
|
|
||||||
src: config.j2
|
|
||||||
dest: "{{ kube_config_dir }}/config"
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- include: secrets.yml
|
|
||||||
tags:
|
|
||||||
- secrets
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
---
|
|
||||||
- name: certs | create system kube-cert groups
|
|
||||||
group: name={{ kube_cert_group }} state=present system=yes
|
|
||||||
|
|
||||||
- name: create system kube user
|
|
||||||
user:
|
|
||||||
name=kube
|
|
||||||
comment="Kubernetes user"
|
|
||||||
shell=/sbin/nologin
|
|
||||||
state=present
|
|
||||||
system=yes
|
|
||||||
groups={{ kube_cert_group }}
|
|
||||||
|
|
||||||
- name: certs | make sure the certificate directory exits
|
|
||||||
file:
|
|
||||||
path={{ kube_cert_dir }}
|
|
||||||
state=directory
|
|
||||||
mode=o-rwx
|
|
||||||
group={{ kube_cert_group }}
|
|
||||||
|
|
||||||
- name: tokens | make sure the tokens directory exits
|
|
||||||
file:
|
|
||||||
path={{ kube_token_dir }}
|
|
||||||
state=directory
|
|
||||||
mode=o-rwx
|
|
||||||
group={{ kube_cert_group }}
|
|
||||||
|
|
||||||
- include: gen_certs.yml
|
|
||||||
run_once: true
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
||||||
- name: Read back the CA certificate
|
|
||||||
slurp:
|
|
||||||
src: "{{ kube_cert_dir }}/ca.crt"
|
|
||||||
register: ca_cert
|
|
||||||
run_once: true
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: certs | register the CA certificate as a fact for later use
|
|
||||||
set_fact:
|
|
||||||
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
|
|
||||||
|
|
||||||
- name: certs | write CA certificate everywhere
|
|
||||||
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- include: gen_tokens.yml
|
|
||||||
run_once: true
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
###
|
|
||||||
# kubernetes system config
|
|
||||||
#
|
|
||||||
# The following values are used to configure various aspects of all
|
|
||||||
# kubernetes services, including
|
|
||||||
#
|
|
||||||
# kube-apiserver.service
|
|
||||||
# kube-controller-manager.service
|
|
||||||
# kube-scheduler.service
|
|
||||||
# kubelet.service
|
|
||||||
# kube-proxy.service
|
|
||||||
|
|
||||||
# Comma separated list of nodes in the etcd cluster
|
|
||||||
# KUBE_ETCD_SERVERS="--etcd_servers="
|
|
||||||
|
|
||||||
# logging to stderr means we get it in the systemd journal
|
|
||||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
|
||||||
|
|
||||||
# journal message level, 0 is debug
|
|
||||||
KUBE_LOG_LEVEL="--v=5"
|
|
||||||
|
|
||||||
# Should this cluster be allowed to run privileged docker containers
|
|
||||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
|
||||||
|
|
||||||
# How the replication controller, scheduler, and proxy
|
|
||||||
KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
|
|
||||||
2127
roles/kubernetes/master/files/kubectl_bash_completion.sh
Normal file
2127
roles/kubernetes/master/files/kubectl_bash_completion.sh
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,32 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: restart daemons
|
|
||||||
command: /bin/true
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart apiserver
|
|
||||||
- restart controller-manager
|
|
||||||
- restart scheduler
|
|
||||||
- restart proxy
|
|
||||||
|
|
||||||
- name: reload systemd
|
- name: reload systemd
|
||||||
command: systemctl daemon-reload
|
command: systemctl daemon-reload
|
||||||
|
|
||||||
- name: restart apiserver
|
- name: restart systemd-kubelet
|
||||||
service:
|
command: /bin/true
|
||||||
name: kube-apiserver
|
notify:
|
||||||
state: restarted
|
- reload systemd
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
- name: restart controller-manager
|
- name: restart kubelet
|
||||||
service:
|
service:
|
||||||
name: kube-controller-manager
|
name: kubelet
|
||||||
state: restarted
|
|
||||||
|
|
||||||
- name: restart scheduler
|
|
||||||
service:
|
|
||||||
name: kube-scheduler
|
|
||||||
state: restarted
|
|
||||||
|
|
||||||
- name: restart proxy
|
|
||||||
service:
|
|
||||||
name: kube-proxy
|
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- { role: kubernetes/common }
|
- { role: etcd }
|
||||||
|
- { role: kubernetes/node }
|
||||||
|
|||||||
@@ -1,87 +0,0 @@
|
|||||||
---
|
|
||||||
- name: get the node token values from token files
|
|
||||||
slurp:
|
|
||||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
|
||||||
with_items:
|
|
||||||
- "system:controller_manager"
|
|
||||||
- "system:scheduler"
|
|
||||||
- "system:kubectl"
|
|
||||||
- "system:proxy"
|
|
||||||
register: tokens
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Set token facts
|
|
||||||
set_fact:
|
|
||||||
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
|
|
||||||
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
|
|
||||||
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
|
|
||||||
proxy_token: "{{ tokens.results[3].content|b64decode }}"
|
|
||||||
|
|
||||||
- name: write the config files for api server
|
|
||||||
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- name: write config file for controller-manager
|
|
||||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
|
||||||
notify:
|
|
||||||
- restart controller-manager
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for controller-manager
|
|
||||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
|
||||||
notify:
|
|
||||||
- restart controller-manager
|
|
||||||
|
|
||||||
- name: write the config file for scheduler
|
|
||||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
|
||||||
notify:
|
|
||||||
- restart scheduler
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for scheduler
|
|
||||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
|
||||||
notify:
|
|
||||||
- restart scheduler
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for kubectl
|
|
||||||
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
|
|
||||||
|
|
||||||
- name: write the config files for proxy
|
|
||||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for proxy
|
|
||||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
|
||||||
|
|
||||||
- name: populate users for basic auth in API
|
|
||||||
lineinfile:
|
|
||||||
dest: "{{ kube_users_dir }}/known_users.csv"
|
|
||||||
create: yes
|
|
||||||
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
|
|
||||||
with_dict: "{{ kube_users }}"
|
|
||||||
notify:
|
|
||||||
- restart apiserver
|
|
||||||
|
|
||||||
- name: Enable apiserver
|
|
||||||
service:
|
|
||||||
name: kube-apiserver
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: Enable controller-manager
|
|
||||||
service:
|
|
||||||
name: kube-controller-manager
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: Enable scheduler
|
|
||||||
service:
|
|
||||||
name: kube-scheduler
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: Enable kube-proxy
|
|
||||||
service:
|
|
||||||
name: kube-proxy
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Write kube-apiserver systemd init file
|
|
||||||
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
|
|
||||||
notify: restart daemons
|
|
||||||
|
|
||||||
- name: Write kube-controller-manager systemd init file
|
|
||||||
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
|
|
||||||
notify: restart daemons
|
|
||||||
|
|
||||||
- name: Write kube-scheduler systemd init file
|
|
||||||
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
|
|
||||||
notify: restart daemons
|
|
||||||
|
|
||||||
- name: Write kube-proxy systemd init file
|
|
||||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
|
||||||
notify: restart daemons
|
|
||||||
|
|
||||||
- name: Install kubernetes binaries
|
|
||||||
copy:
|
|
||||||
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
|
|
||||||
dest={{ bin_dir }}
|
|
||||||
owner=kube
|
|
||||||
mode=u+x
|
|
||||||
with_items:
|
|
||||||
- kube-apiserver
|
|
||||||
- kube-controller-manager
|
|
||||||
- kube-scheduler
|
|
||||||
- kube-proxy
|
|
||||||
- kubectl
|
|
||||||
notify:
|
|
||||||
- restart daemons
|
|
||||||
|
|
||||||
- name: Allow apiserver to bind on both secure and insecure ports
|
|
||||||
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
|
|
||||||
@@ -1,3 +1,82 @@
|
|||||||
---
|
---
|
||||||
- include: install.yml
|
- name: Copy kubectl bash completion
|
||||||
- include: config.yml
|
copy:
|
||||||
|
src: kubectl_bash_completion.sh
|
||||||
|
dest: /etc/bash_completion.d/kubectl.sh
|
||||||
|
|
||||||
|
- name: Install kubectl binary
|
||||||
|
synchronize:
|
||||||
|
src: "{{ local_release_dir }}/kubernetes/bin/kubectl"
|
||||||
|
dest: "{{ bin_dir }}/kubectl"
|
||||||
|
archive: no
|
||||||
|
checksum: yes
|
||||||
|
times: yes
|
||||||
|
delegate_to: "{{ groups['downloader'][0] }}"
|
||||||
|
|
||||||
|
- name: Perms kubectl binary
|
||||||
|
file: path={{ bin_dir }}/kubectl owner=kube mode=0755 state=file
|
||||||
|
|
||||||
|
- name: populate users for basic auth in API
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ kube_users_dir }}/known_users.csv"
|
||||||
|
create: yes
|
||||||
|
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
|
||||||
|
backup: yes
|
||||||
|
with_dict: "{{ kube_users }}"
|
||||||
|
|
||||||
|
# Sync masters
|
||||||
|
- name: synchronize auth directories for masters
|
||||||
|
synchronize:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ kube_config_dir }}"
|
||||||
|
recursive: yes
|
||||||
|
delete: yes
|
||||||
|
rsync_opts: [ '--one-file-system']
|
||||||
|
set_remote_user: false
|
||||||
|
with_items:
|
||||||
|
- "{{ kube_token_dir }}"
|
||||||
|
- "{{ kube_cert_dir }}"
|
||||||
|
- "{{ kube_users_dir }}"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when: inventory_hostname != "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
# Write manifests
|
||||||
|
- name: Write kube-apiserver manifest
|
||||||
|
template:
|
||||||
|
src: manifests/kube-apiserver.manifest.j2
|
||||||
|
dest: "{{ kube_manifest_dir }}/kube-apisever.manifest"
|
||||||
|
notify:
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
- name: wait for the apiserver to be running (pulling image and running container)
|
||||||
|
wait_for:
|
||||||
|
port: "{{kube_apiserver_insecure_port}}"
|
||||||
|
delay: 10
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Create 'kube-system' namespace
|
||||||
|
uri:
|
||||||
|
url: http://127.0.0.1:{{ kube_apiserver_insecure_port }}/api/v1/namespaces
|
||||||
|
method: POST
|
||||||
|
body: '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}'
|
||||||
|
status_code: 201,409
|
||||||
|
body_format: json
|
||||||
|
run_once: yes
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Write kube-controller-manager manifest
|
||||||
|
template:
|
||||||
|
src: manifests/kube-controller-manager.manifest.j2
|
||||||
|
dest: "{{ kube_config_dir }}/kube-controller-manager.manifest"
|
||||||
|
|
||||||
|
- name: Write kube-scheduler manifest
|
||||||
|
template:
|
||||||
|
src: manifests/kube-scheduler.manifest.j2
|
||||||
|
dest: "{{ kube_config_dir }}/kube-scheduler.manifest"
|
||||||
|
|
||||||
|
- name: Write podmaster manifest
|
||||||
|
template:
|
||||||
|
src: manifests/kube-podmaster.manifest.j2
|
||||||
|
dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest"
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
###
|
|
||||||
# kubernetes system config
|
|
||||||
#
|
|
||||||
# The following values are used to configure the kube-apiserver
|
|
||||||
#
|
|
||||||
|
|
||||||
# The address on the local server to listen to.
|
|
||||||
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
|
|
||||||
|
|
||||||
# The port on the local server to listen on.
|
|
||||||
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
|
|
||||||
|
|
||||||
# KUBELET_PORT="--kubelet_port=10250"
|
|
||||||
|
|
||||||
# Address range to use for services
|
|
||||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
|
||||||
|
|
||||||
# Location of the etcd cluster
|
|
||||||
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
|
||||||
|
|
||||||
# default admission control policies
|
|
||||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
|
||||||
|
|
||||||
# Add you own!
|
|
||||||
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
###
|
|
||||||
# The following values are used to configure the kubernetes controller-manager
|
|
||||||
|
|
||||||
# defaults from config and apiserver should be adequate
|
|
||||||
|
|
||||||
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
current-context: controller-manager-to-{{ cluster_name }}
|
|
||||||
preferences: {}
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
|
||||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
|
||||||
name: {{ cluster_name }}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: {{ cluster_name }}
|
|
||||||
user: controller-manager
|
|
||||||
name: controller-manager-to-{{ cluster_name }}
|
|
||||||
users:
|
|
||||||
- name: controller-manager
|
|
||||||
user:
|
|
||||||
token: {{ controller_manager_token }}
|
|
||||||
@@ -4,8 +4,8 @@ current-context: kubectl-to-{{ cluster_name }}
|
|||||||
preferences: {}
|
preferences: {}
|
||||||
clusters:
|
clusters:
|
||||||
- cluster:
|
- cluster:
|
||||||
certificate-authority-data: {{ kube_ca_cert|b64encode }}
|
certificate-authority-data: {{ kube_node_cert|b64encode }}
|
||||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
|
||||||
name: {{ cluster_name }}
|
name: {{ cluster_name }}
|
||||||
contexts:
|
contexts:
|
||||||
- context:
|
- context:
|
||||||
@@ -0,0 +1,52 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-apiserver
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: kube-apiserver
|
||||||
|
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
|
||||||
|
command:
|
||||||
|
- /hyperkube
|
||||||
|
- apiserver
|
||||||
|
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|
||||||
|
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
||||||
|
- --service-cluster-ip-range={{ kube_service_addresses }}
|
||||||
|
- --client-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
|
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
|
||||||
|
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
|
||||||
|
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
|
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
|
- --secure-port={{ kube_apiserver_port }}
|
||||||
|
- --insecure-port={{ kube_apiserver_insecure_port }}
|
||||||
|
{% if kube_api_runtime_config is defined %}
|
||||||
|
{% for conf in kube_api_runtime_config %}
|
||||||
|
- --runtime-config={{ conf }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
||||||
|
- --v={{ kube_log_level | default('2') }}
|
||||||
|
- --allow-privileged=true
|
||||||
|
ports:
|
||||||
|
- containerPort: {{ kube_apiserver_port }}
|
||||||
|
hostPort: {{ kube_apiserver_port }}
|
||||||
|
name: https
|
||||||
|
- containerPort: {{ kube_apiserver_insecure_port }}
|
||||||
|
hostPort: {{ kube_apiserver_insecure_port }}
|
||||||
|
name: local
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: {{ kube_config_dir }}
|
||||||
|
name: kubernetes-config
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /etc/ssl/certs
|
||||||
|
name: ssl-certs-host
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: {{ kube_config_dir }}
|
||||||
|
name: kubernetes-config
|
||||||
|
- hostPath:
|
||||||
|
path: /usr/share/ca-certificates
|
||||||
|
name: ssl-certs-host
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-controller-manager
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: kube-controller-manager
|
||||||
|
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
|
||||||
|
command:
|
||||||
|
- /hyperkube
|
||||||
|
- controller-manager
|
||||||
|
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
|
||||||
|
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||||
|
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
|
- --v={{ kube_log_level | default('2') }}
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
host: 127.0.0.1
|
||||||
|
path: /healthz
|
||||||
|
port: 10252
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 1
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: {{ kube_cert_dir }}
|
||||||
|
name: ssl-certs-kubernetes
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /etc/ssl/certs
|
||||||
|
name: ssl-certs-host
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: {{ kube_cert_dir }}
|
||||||
|
name: ssl-certs-kubernetes
|
||||||
|
- hostPath:
|
||||||
|
path: /usr/share/ca-certificates
|
||||||
|
name: ssl-certs-host
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-podmaster
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: scheduler-elector
|
||||||
|
image: gcr.io/google_containers/podmaster:1.1
|
||||||
|
command:
|
||||||
|
- /podmaster
|
||||||
|
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|
||||||
|
- --key=scheduler
|
||||||
|
- --source-file={{ kube_config_dir}}/kube-scheduler.manifest
|
||||||
|
- --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: {{ kube_config_dir }}
|
||||||
|
name: manifest-src
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: {{ kube_manifest_dir }}
|
||||||
|
name: manifest-dst
|
||||||
|
- name: controller-manager-elector
|
||||||
|
image: gcr.io/google_containers/podmaster:1.1
|
||||||
|
command:
|
||||||
|
- /podmaster
|
||||||
|
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|
||||||
|
- --key=controller
|
||||||
|
- --source-file={{ kube_config_dir }}/kube-controller-manager.manifest
|
||||||
|
- --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest
|
||||||
|
terminationMessagePath: /dev/termination-log
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: {{ kube_config_dir }}
|
||||||
|
name: manifest-src
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: {{ kube_manifest_dir }}
|
||||||
|
name: manifest-dst
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: {{ kube_config_dir }}
|
||||||
|
name: manifest-src
|
||||||
|
- hostPath:
|
||||||
|
path: {{ kube_manifest_dir }}
|
||||||
|
name: manifest-dst
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: kube-scheduler
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: kube-scheduler
|
||||||
|
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
|
||||||
|
command:
|
||||||
|
- /hyperkube
|
||||||
|
- scheduler
|
||||||
|
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
|
||||||
|
- --v={{ kube_log_level | default('2') }}
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
host: 127.0.0.1
|
||||||
|
path: /healthz
|
||||||
|
port: 10251
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 1
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
###
|
|
||||||
# kubernetes proxy config
|
|
||||||
|
|
||||||
# default config should be adequate
|
|
||||||
|
|
||||||
# Add your own!
|
|
||||||
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
current-context: proxy-to-{{ cluster_name }}
|
|
||||||
preferences: {}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: {{ cluster_name }}
|
|
||||||
user: proxy
|
|
||||||
name: proxy-to-{{ cluster_name }}
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
|
||||||
server: http://{{ groups['kube-master'][0] }}:8080
|
|
||||||
name: {{ cluster_name }}
|
|
||||||
users:
|
|
||||||
- name: proxy
|
|
||||||
user:
|
|
||||||
token: {{ proxy_token }}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
###
|
|
||||||
# kubernetes scheduler config
|
|
||||||
|
|
||||||
# default config should be adequate
|
|
||||||
|
|
||||||
# Add your own!
|
|
||||||
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
current-context: scheduler-to-{{ cluster_name }}
|
|
||||||
preferences: {}
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
|
||||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
|
||||||
name: {{ cluster_name }}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: {{ cluster_name }}
|
|
||||||
user: scheduler
|
|
||||||
name: scheduler-to-{{ cluster_name }}
|
|
||||||
users:
|
|
||||||
- name: scheduler
|
|
||||||
user:
|
|
||||||
token: {{ scheduler_token }}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Kubernetes API Server
|
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|
||||||
Requires=etcd2.service
|
|
||||||
After=etcd2.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=/etc/network-environment
|
|
||||||
EnvironmentFile=-/etc/kubernetes/config
|
|
||||||
EnvironmentFile=-/etc/kubernetes/apiserver
|
|
||||||
User=kube
|
|
||||||
ExecStart={{ bin_dir }}/kube-apiserver \
|
|
||||||
$KUBE_LOGTOSTDERR \
|
|
||||||
$KUBE_LOG_LEVEL \
|
|
||||||
$KUBE_ETCD_SERVERS \
|
|
||||||
$KUBE_API_ADDRESS \
|
|
||||||
$KUBE_API_PORT \
|
|
||||||
$KUBELET_PORT \
|
|
||||||
$KUBE_ALLOW_PRIV \
|
|
||||||
$KUBE_SERVICE_ADDRESSES \
|
|
||||||
$KUBE_ADMISSION_CONTROL \
|
|
||||||
$KUBE_API_ARGS
|
|
||||||
Restart=on-failure
|
|
||||||
Type=notify
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Kubernetes Controller Manager
|
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|
||||||
Requires=etcd2.service
|
|
||||||
After=etcd2.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=-/etc/kubernetes/config
|
|
||||||
EnvironmentFile=-/etc/kubernetes/controller-manager
|
|
||||||
User=kube
|
|
||||||
ExecStart={{ bin_dir }}/kube-controller-manager \
|
|
||||||
$KUBE_LOGTOSTDERR \
|
|
||||||
$KUBE_LOG_LEVEL \
|
|
||||||
$KUBE_MASTER \
|
|
||||||
$KUBE_CONTROLLER_MANAGER_ARGS
|
|
||||||
Restart=on-failure
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Kubernetes Kube-Proxy Server
|
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|
||||||
{% if overlay_network_plugin|default('') %}
|
|
||||||
After=docker.service calico-node.service
|
|
||||||
{% else %}
|
|
||||||
After=docker.service
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=/etc/network-environment
|
|
||||||
ExecStart={{ bin_dir }}/kube-proxy \
|
|
||||||
$KUBE_LOGTOSTDERR \
|
|
||||||
$KUBE_LOG_LEVEL \
|
|
||||||
$KUBE_MASTER \
|
|
||||||
$KUBE_PROXY_ARGS
|
|
||||||
Restart=on-failure
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Kubernetes Scheduler Plugin
|
|
||||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|
||||||
Requires=etcd2.service
|
|
||||||
After=etcd2.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
EnvironmentFile=-/etc/kubernetes/config
|
|
||||||
EnvironmentFile=-/etc/kubernetes/scheduler
|
|
||||||
User=kube
|
|
||||||
ExecStart={{ bin_dir }}/kube-scheduler \
|
|
||||||
$KUBE_LOGTOSTDERR \
|
|
||||||
$KUBE_LOG_LEVEL \
|
|
||||||
$KUBE_MASTER \
|
|
||||||
$KUBE_SCHEDULER_ARGS
|
|
||||||
Restart=on-failure
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -11,11 +11,8 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
|||||||
# look in here. Don't do it.
|
# look in here. Don't do it.
|
||||||
kube_config_dir: /etc/kubernetes
|
kube_config_dir: /etc/kubernetes
|
||||||
|
|
||||||
# The port the API Server will be listening on.
|
|
||||||
kube_master_port: 443
|
|
||||||
|
|
||||||
# This is where all the cert scripts and certs will be located
|
# This is where all the cert scripts and certs will be located
|
||||||
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
|
||||||
# This is where all of the bearer tokens will be stored
|
# This is where all of the bearer tokens will be stored
|
||||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||||
@@ -33,9 +30,20 @@ kube_cert_group: kube-cert
|
|||||||
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
|
kube_proxy_mode: userspace
|
||||||
|
|
||||||
|
# Temporary image, waiting for official google release
|
||||||
|
# hyperkube_image_repo: gcr.io/google_containers/hyperkube
|
||||||
|
hyperkube_image_repo: quay.io/smana/hyperkube
|
||||||
|
hyperkube_image_tag: v1.1.3
|
||||||
|
|
||||||
# IP address of the DNS server.
|
# IP address of the DNS server.
|
||||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||||
# server and expose it under this IP address. The IP address must be from
|
# server and expose it under this IP address. The IP address must be from
|
||||||
# the range specified as kube_service_addresses. This magic will actually
|
# the range specified as kube_service_addresses. This magic will actually
|
||||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
# pick the 10th ip address in the kube_service_addresses range and use that.
|
||||||
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
|
||||||
|
|
||||||
|
kube_api_runtime_config:
|
||||||
|
- extensions/v1beta1/daemonsets=true
|
||||||
|
- extensions/v1beta1/deployments=true
|
||||||
@@ -19,7 +19,10 @@ token_file="${token_dir}/known_tokens.csv"
|
|||||||
|
|
||||||
create_accounts=($@)
|
create_accounts=($@)
|
||||||
|
|
||||||
touch "${token_file}"
|
if [ ! -e "${token_file}" ]; then
|
||||||
|
touch "${token_file}"
|
||||||
|
fi
|
||||||
|
|
||||||
for account in "${create_accounts[@]}"; do
|
for account in "${create_accounts[@]}"; do
|
||||||
if grep ",${account}," "${token_file}" ; then
|
if grep ",${account}," "${token_file}" ; then
|
||||||
continue
|
continue
|
||||||
107
roles/kubernetes/node/files/make-ssl.sh
Normal file
107
roles/kubernetes/node/files/make-ssl.sh
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Author: skahlouc@skahlouc-laptop
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
usage()
|
||||||
|
{
|
||||||
|
cat << EOF
|
||||||
|
Create self signed certificates
|
||||||
|
|
||||||
|
Usage : $(basename $0) -f <config> [-c <cloud_provider>] [-d <ssldir>] [-g <ssl_group>]
|
||||||
|
-h | --help : Show this message
|
||||||
|
-f | --config : Openssl configuration file
|
||||||
|
-c | --cloud : Cloud provider (GCE, AWS or AZURE)
|
||||||
|
-d | --ssldir : Directory where the certificates will be installed
|
||||||
|
-g | --sslgrp : Group of the certificates
|
||||||
|
|
||||||
|
ex :
|
||||||
|
$(basename $0) -f openssl.conf -c GCE -d /srv/ssl -g kube
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Options parsing
|
||||||
|
while (($#)); do
|
||||||
|
case "$1" in
|
||||||
|
-h | --help) usage; exit 0;;
|
||||||
|
-f | --config) CONFIG=${2}; shift 2;;
|
||||||
|
-c | --cloud) CLOUD=${2}; shift 2;;
|
||||||
|
-d | --ssldir) SSLDIR="${2}"; shift 2;;
|
||||||
|
-g | --group) SSLGRP="${2}"; shift 2;;
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
echo "ERROR : Unknown option"
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z ${CONFIG} ]; then
|
||||||
|
echo "ERROR: the openssl configuration file is missing. option -f"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z ${SSLDIR} ]; then
|
||||||
|
SSLDIR="/etc/kubernetes/certs"
|
||||||
|
fi
|
||||||
|
if [ -z ${SSLGRP} ]; then
|
||||||
|
SSLGRP="kube-cert"
|
||||||
|
fi
|
||||||
|
|
||||||
|
#echo "config=$CONFIG, cloud=$CLOUD, certdir=$SSLDIR, certgroup=$SSLGRP"
|
||||||
|
|
||||||
|
SUPPORTED_CLOUDS="GCE AWS AZURE"
|
||||||
|
|
||||||
|
# TODO: Add support for discovery on other providers?
|
||||||
|
if [ "${CLOUD}" == "GCE" ]; then
|
||||||
|
CLOUD_IP=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${CLOUD}" == "AWS" ]; then
|
||||||
|
CLOUD_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${CLOUD}" == "AZURE" ]; then
|
||||||
|
CLOUD_IP=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||||
|
fi
|
||||||
|
|
||||||
|
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
|
||||||
|
trap 'rm -rf "${tmpdir}"' EXIT
|
||||||
|
cd "${tmpdir}"
|
||||||
|
|
||||||
|
mkdir -p "${SSLDIR}"
|
||||||
|
|
||||||
|
# Root CA
|
||||||
|
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
|
||||||
|
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||||
|
|
||||||
|
# Apiserver
|
||||||
|
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||||
|
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||||
|
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||||
|
|
||||||
|
# Nodes and Admin
|
||||||
|
for i in node admin; do
|
||||||
|
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
|
||||||
|
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
|
||||||
|
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Install certs
|
||||||
|
mv *.pem ${SSLDIR}/
|
||||||
|
chgrp ${SSLGRP} ${SSLDIR}/*
|
||||||
|
chmod 600 ${SSLDIR}/*-key.pem
|
||||||
|
chown root:root ${SSLDIR}/*-key.pem
|
||||||
@@ -1,19 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: restart daemons
|
- name: reload systemd
|
||||||
|
command: systemctl daemon-reload
|
||||||
|
|
||||||
|
- name: restart systemd-kubelet
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify:
|
notify:
|
||||||
|
- reload systemd
|
||||||
- restart kubelet
|
- restart kubelet
|
||||||
- restart proxy
|
|
||||||
|
|
||||||
- name: restart kubelet
|
- name: restart kubelet
|
||||||
service:
|
service:
|
||||||
name: kubelet
|
name: kubelet
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
- name: restart proxy
|
|
||||||
service:
|
|
||||||
name: kube-proxy
|
|
||||||
state: restarted
|
|
||||||
|
|
||||||
- name: reload systemd
|
|
||||||
command: systemctl daemon-reload
|
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
- { role: kubernetes/common }
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Get the node token values
|
|
||||||
slurp:
|
|
||||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
|
||||||
with_items:
|
|
||||||
- "system:kubelet"
|
|
||||||
- "system:proxy"
|
|
||||||
register: tokens
|
|
||||||
run_once: true
|
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
- name: Set token facts
|
|
||||||
set_fact:
|
|
||||||
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
|
|
||||||
proxy_token: "{{ tokens.results[1].content|b64decode }}"
|
|
||||||
|
|
||||||
- name: Create kubelet environment vars dir
|
|
||||||
file: path=/etc/systemd/system/kubelet.service.d state=directory
|
|
||||||
|
|
||||||
- name: Write kubelet config file
|
|
||||||
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart kubelet
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for kubelet
|
|
||||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
|
||||||
notify:
|
|
||||||
- restart kubelet
|
|
||||||
|
|
||||||
- name: Create proxy environment vars dir
|
|
||||||
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
|
|
||||||
|
|
||||||
- name: Write proxy config file
|
|
||||||
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart proxy
|
|
||||||
|
|
||||||
- name: write the kubecfg (auth) file for kube-proxy
|
|
||||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
|
||||||
notify:
|
|
||||||
- restart proxy
|
|
||||||
|
|
||||||
- name: Enable kubelet
|
|
||||||
service:
|
|
||||||
name: kubelet
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: Enable proxy
|
|
||||||
service:
|
|
||||||
name: kube-proxy
|
|
||||||
enabled: yes
|
|
||||||
state: started
|
|
||||||
28
roles/kubernetes/node/tasks/gen_certs.yml
Normal file
28
roles/kubernetes/node/tasks/gen_certs.yml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
- name: certs | install cert generation script
|
||||||
|
copy:
|
||||||
|
src=make-ssl.sh
|
||||||
|
dest={{ kube_script_dir }}
|
||||||
|
mode=0500
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: certs | write openssl config
|
||||||
|
template:
|
||||||
|
src: "openssl.conf.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/.openssl.conf"
|
||||||
|
|
||||||
|
- name: certs | run cert generation script
|
||||||
|
shell: >
|
||||||
|
{{ kube_script_dir }}/make-ssl.sh
|
||||||
|
-f {{ kube_config_dir }}/.openssl.conf
|
||||||
|
-g {{ kube_cert_group }}
|
||||||
|
-d {{ kube_cert_dir }}
|
||||||
|
args:
|
||||||
|
creates: "{{ kube_cert_dir }}/apiserver.pem"
|
||||||
|
|
||||||
|
- name: certs | check certificate permissions
|
||||||
|
file:
|
||||||
|
path={{ kube_cert_dir }}
|
||||||
|
group={{ kube_cert_group }}
|
||||||
|
owner=kube
|
||||||
|
recurse=yes
|
||||||
48
roles/kubernetes/node/tasks/gen_tokens.yml
Normal file
48
roles/kubernetes/node/tasks/gen_tokens.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
- name: tokens | copy the token gen script
|
||||||
|
copy:
|
||||||
|
src=kube-gen-token.sh
|
||||||
|
dest={{ kube_script_dir }}
|
||||||
|
mode=u+x
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: tokens | generate tokens for master components
|
||||||
|
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||||
|
environment:
|
||||||
|
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||||
|
with_nested:
|
||||||
|
- [ "system:kubectl" ]
|
||||||
|
- "{{ groups['kube-master'] }}"
|
||||||
|
register: gentoken
|
||||||
|
changed_when: "'Added' in gentoken.stdout"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: tokens | generate tokens for node components
|
||||||
|
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||||
|
environment:
|
||||||
|
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||||
|
with_nested:
|
||||||
|
- [ 'system:kubelet' ]
|
||||||
|
- "{{ groups['kube-node'] }}"
|
||||||
|
register: gentoken
|
||||||
|
changed_when: "'Added' in gentoken.stdout"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: tokens | generate tokens for calico
|
||||||
|
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||||
|
environment:
|
||||||
|
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||||
|
with_nested:
|
||||||
|
- [ "system:calico" ]
|
||||||
|
- "{{ groups['k8s-cluster'] }}"
|
||||||
|
register: gentoken
|
||||||
|
changed_when: "'Added' in gentoken.stdout"
|
||||||
|
when: kube_network_plugin == "calico"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
- name: tokens | get the calico token values
|
||||||
|
slurp:
|
||||||
|
src: "{{ kube_token_dir }}/system:calico-{{ inventory_hostname }}.token"
|
||||||
|
register: calico_token
|
||||||
|
when: kube_network_plugin == "calico"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
@@ -1,20 +1,48 @@
|
|||||||
---
|
---
|
||||||
- name: Write kube-proxy systemd init file
|
- debug: msg="{{init_system == "systemd"}}"
|
||||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
- debug: msg="{{init_system}}"
|
||||||
notify: restart daemons
|
|
||||||
|
|
||||||
- name: Write kubelet systemd init file
|
- name: install | Write kubelet systemd init file
|
||||||
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
|
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
|
||||||
notify: restart daemons
|
when: init_system == "systemd"
|
||||||
|
notify: restart systemd-kubelet
|
||||||
|
|
||||||
- name: Install kubernetes binaries
|
- name: install | Write kubelet initd script
|
||||||
copy:
|
template: src=deb-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
|
||||||
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
|
when: init_system == "sysvinit" and ansible_os_family == "Debian"
|
||||||
dest={{ bin_dir }}
|
notify: restart kubelet
|
||||||
owner=kube
|
|
||||||
mode=u+x
|
- name: install | Write kubelet initd script
|
||||||
with_items:
|
template: src=rh-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
|
||||||
- kube-proxy
|
when: init_system == "sysvinit" and ansible_os_family == "RedHat"
|
||||||
- kubelet
|
notify: restart kubelet
|
||||||
|
|
||||||
|
- name: install | Install kubelet binary
|
||||||
|
synchronize:
|
||||||
|
src: "{{ local_release_dir }}/kubernetes/bin/kubelet"
|
||||||
|
dest: "{{ bin_dir }}/kubelet"
|
||||||
|
times: yes
|
||||||
|
archive: no
|
||||||
|
delegate_to: "{{ groups['downloader'][0] }}"
|
||||||
notify:
|
notify:
|
||||||
- restart daemons
|
- restart kubelet
|
||||||
|
|
||||||
|
- name: install | Perms kubelet binary
|
||||||
|
file: path={{ bin_dir }}/kubelet owner=kube mode=0755 state=file
|
||||||
|
|
||||||
|
- name: install | Calico-plugin | Directory
|
||||||
|
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/ state=directory
|
||||||
|
when: kube_network_plugin == "calico"
|
||||||
|
|
||||||
|
- name: install | Calico-plugin | Binary
|
||||||
|
synchronize:
|
||||||
|
src: "{{ local_release_dir }}/calico/bin/calico"
|
||||||
|
dest: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico"
|
||||||
|
times: yes
|
||||||
|
archive: no
|
||||||
|
delegate_to: "{{ groups['downloader'][0] }}"
|
||||||
|
when: kube_network_plugin == "calico"
|
||||||
|
notify: restart kubelet
|
||||||
|
|
||||||
|
- name: install | Perms calico plugin binary
|
||||||
|
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file
|
||||||
|
|||||||
@@ -1,4 +1,49 @@
|
|||||||
---
|
---
|
||||||
|
- name: create kubernetes config directory
|
||||||
|
file: path={{ kube_config_dir }} state=directory
|
||||||
|
|
||||||
|
- name: create kubernetes script directory
|
||||||
|
file: path={{ kube_script_dir }} state=directory
|
||||||
|
|
||||||
|
- name: Make sure manifest directory exists
|
||||||
|
file: path={{ kube_manifest_dir }} state=directory
|
||||||
|
|
||||||
|
|
||||||
|
- name: certs | create system kube-cert groups
|
||||||
|
group: name={{ kube_cert_group }} state=present system=yes
|
||||||
|
|
||||||
|
- name: create system kube user
|
||||||
|
user:
|
||||||
|
name=kube
|
||||||
|
comment="Kubernetes user"
|
||||||
|
shell=/sbin/nologin
|
||||||
|
state=present
|
||||||
|
system=yes
|
||||||
|
groups={{ kube_cert_group }}
|
||||||
|
|
||||||
|
- include: secrets.yml
|
||||||
|
tags:
|
||||||
|
- secrets
|
||||||
|
|
||||||
- include: install.yml
|
- include: install.yml
|
||||||
- include: config.yml
|
|
||||||
- include: temp_workaround.yml
|
- name: Write kubelet config file
|
||||||
|
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet backup=yes
|
||||||
|
notify:
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
|
- name: write the kubecfg (auth) file for kubelet
|
||||||
|
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
|
||||||
|
notify:
|
||||||
|
- restart kubelet
|
||||||
|
|
||||||
|
- name: Write proxy manifest
|
||||||
|
template:
|
||||||
|
src: manifests/kube-proxy.manifest.j2
|
||||||
|
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
||||||
|
|
||||||
|
- name: Enable kubelet
|
||||||
|
service:
|
||||||
|
name: kubelet
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
|||||||
52
roles/kubernetes/node/tasks/secrets.yml
Normal file
52
roles/kubernetes/node/tasks/secrets.yml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
- name: certs | make sure the certificate directory exits
|
||||||
|
file:
|
||||||
|
path={{ kube_cert_dir }}
|
||||||
|
state=directory
|
||||||
|
mode=o-rwx
|
||||||
|
group={{ kube_cert_group }}
|
||||||
|
|
||||||
|
- name: tokens | make sure the tokens directory exits
|
||||||
|
file:
|
||||||
|
path={{ kube_token_dir }}
|
||||||
|
state=directory
|
||||||
|
mode=o-rwx
|
||||||
|
group={{ kube_cert_group }}
|
||||||
|
|
||||||
|
- include: gen_certs.yml
|
||||||
|
run_once: true
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- include: gen_tokens.yml
|
||||||
|
|
||||||
|
# Sync certs between nodes
|
||||||
|
- user:
|
||||||
|
name: '{{ansible_user_id}}'
|
||||||
|
generate_ssh_key: yes
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: yes
|
||||||
|
|
||||||
|
- name: 'get ssh keypair'
|
||||||
|
slurp: path=~/.ssh/id_rsa.pub
|
||||||
|
register: public_key
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
- name: 'setup keypair on nodes'
|
||||||
|
authorized_key:
|
||||||
|
user: '{{ansible_user_id}}'
|
||||||
|
key: "{{public_key.content|b64decode }}"
|
||||||
|
|
||||||
|
- name: synchronize certificates for nodes
|
||||||
|
synchronize:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ kube_cert_dir }}"
|
||||||
|
recursive: yes
|
||||||
|
delete: yes
|
||||||
|
rsync_opts: [ '--one-file-system']
|
||||||
|
set_remote_user: false
|
||||||
|
with_items:
|
||||||
|
- "{{ kube_cert_dir}}/ca.pem"
|
||||||
|
- "{{ kube_cert_dir}}/node.pem"
|
||||||
|
- "{{ kube_cert_dir}}/node-key.pem"
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
when: inventory_hostname not in "{{ groups['kube-master'] }}"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user