Compare commits

...

190 Commits

Author SHA1 Message Date
Smaine Kahlouch
af8f394714 update README, local connection 2016-01-08 16:04:17 +01:00
Smaine Kahlouch
eab2cec0ad fix kubectl perms 2016-01-08 16:02:40 +01:00
Smaine Kahlouch
0b17a4c00f Merge pull request #45 from jcsirot/fix-calico-systemd
Fix calico with systemd
2016-01-08 11:34:58 +01:00
ant31
f49aa90bf7 fix synchronize pull mode 2016-01-08 11:32:06 +01:00
Jean-Christophe Sirot
6f9148e994 Fix calico with systemd 2016-01-08 10:32:43 +01:00
Antoine Legrand
7c8e9dbe00 Update README.md 2016-01-08 00:36:06 +01:00
Antoine Legrand
df3d0bcc21 Build status 2016-01-08 00:20:31 +01:00
Antoine Legrand
7913d62749 Merge pull request #44 from ansibl8s/travis
Travis  tests
2016-01-07 23:46:02 +01:00
Smaine Kahlouch
d5320961e9 enforce user root when sudo is used 2016-01-05 15:33:23 +01:00
ant31
9c461e1018 Use inline update for resolv.conf 2016-01-05 12:31:49 +01:00
ant31
9a03249446 Add travis tests 2016-01-05 12:31:49 +01:00
Smaine Kahlouch
4e015dd3f1 add requirement python-netaddr 2016-01-04 17:06:26 +01:00
Smaine Kahlouch
6f53269ce3 Merge pull request #40 from ansibl8s/common
Common
2016-01-04 17:01:08 +01:00
Smaine Kahlouch
e356b2de4f Updated README 2016-01-04 17:00:40 +01:00
ant31
8fa0110e28 Remove local dep. downloader 2016-01-04 16:10:29 +01:00
Smaine Kahlouch
2a08f7bc0a inventory example, localhost for downloader 2016-01-04 14:54:30 +01:00
Smaine Kahlouch
99d16913d3 use bin_dir var in init scripts 2016-01-04 14:35:01 +01:00
Smaine Kahlouch
d172457504 sysvinit scripts 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
6103d673b7 New calico's configuration 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
29bf90a858 review handlers for sysvinit 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
2c35e4c055 Merge pull request #41 from ansibl8s/download_role
Rework download role
2015-12-31 16:22:07 +01:00
ant31
e3cdb3574a Rework download role 2015-12-31 16:12:16 +01:00
Smaine Kahlouch
15cd1bfc56 rename env file 2015-12-31 14:55:06 +01:00
Smaine Kahlouch
392570f4ff distinct local hostname 2015-12-31 14:54:51 +01:00
Smaine Kahlouch
be5fe9af54 never report changed for init system detection 2015-12-31 14:54:15 +01:00
Smaine Kahlouch
7006d56ab8 split role download and preinstall 2015-12-31 14:07:02 +01:00
Smaine Kahlouch
1695682d85 handle sysvinit 2015-12-31 14:05:55 +01:00
Smaine Kahlouch
1d1d8b9c28 add nodnsupdate hook for RedHat 2015-12-31 14:04:08 +01:00
Smaine Kahlouch
98fe2c02b2 review local tasks 2015-12-31 10:28:47 +01:00
Smaine Kahlouch
92c2a9457e rename role common to kubernetes/preinstall 2015-12-31 10:03:22 +01:00
Smaine Kahlouch
a11e0cb3d1 keep host downloader 2015-12-31 09:38:55 +01:00
Smaine Kahlouch
dbb6f4934e common role in order to support other linux distribs 2015-12-30 22:26:45 +01:00
Smaine Kahlouch
9f07f2a951 install docker on a largest number of linux distribution (based on https://github.com/marklee77/ansible-role-docker) 2015-12-30 22:26:45 +01:00
Smaine Kahlouch
005ddedb94 Merge pull request #38 from ansibl8s/dockerize_dnsmasq
[WIP] Docker dnsmasq
2015-12-30 14:04:17 +01:00
Smaine Kahlouch
b72e220126 remove carriage return 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
e0f460d9b5 copy template dnsmasq pod and remove handlers 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
2bd6b83656 increase etcd timeout value again 2015-12-30 14:02:22 +01:00
ant31
2df70d6a3d Docker dnsmasq 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
ddaeb2b8fa Merge pull request #35 from ansibl8s/dockerize_etcd
Dockerize etcd
2015-12-30 14:01:00 +01:00
Smaine Kahlouch
6f4f170a88 remove useless etcd download, runs into docker containers 2015-12-30 09:50:02 +01:00
Smaine Kahlouch
3f3b03bc99 increase timeout value for etcd wait_for 2015-12-29 21:37:17 +01:00
Smaine Kahlouch
c9d9ccf025 move network-environment template into node role, required by kubelet 2015-12-29 21:36:51 +01:00
ant31
e378f4fb14 Install calico-plugin before running calico 2015-12-28 22:04:39 +01:00
Antoine Legrand
5c15d14f12 Run etcd as pod 2015-12-28 22:04:39 +01:00
Antoine Legrand
b45747ec86 Merge pull request #37 from ansibl8s/apiserver_https
Apiserver https
2015-12-28 13:00:46 +01:00
ant31
d597f707f1 use backup file 2015-12-24 19:23:21 +01:00
Smaine Kahlouch
4388cab8d6 Use second ip address in order to avoid any ip range problem 2015-12-24 13:58:04 +01:00
Smaine Kahlouch
595e93e6da Peer with router configuration is made on the first etcd node 2015-12-24 13:56:53 +01:00
Smaine Kahlouch
5f4e01cec5 new version of logstash submodule 2015-12-22 16:38:40 +01:00
Smaine Kahlouch
7c9c609ac4 calico uses loadbalancer address for apiserver 2015-12-22 08:45:14 +01:00
Smaine Kahlouch
680864f95c don't sync certs on masters, already done in another task 2015-12-21 14:24:57 +01:00
Smaine Kahlouch
7315d33e3c use ip for etcd proxies even when hostnames are used in the inventory 2015-12-21 14:24:10 +01:00
Smaine Kahlouch
b2afbfd4fb don't touch if the file exists 2015-12-21 14:23:33 +01:00
Smaine Kahlouch
ab694ee291 Install python-httplib2 required packaged 2015-12-21 12:00:42 +01:00
Smaine Kahlouch
bba3525cd8 use loadbalancer when that's possible 2015-12-21 09:13:48 +01:00
Smaine Kahlouch
2c816f66a3 Check calico network pool 2015-12-20 16:51:14 +01:00
Smaine Kahlouch
d585ceaf3b set permissions on network-environment file 2015-12-19 12:32:06 +01:00
Smaine Kahlouch
fec1dc9041 A single file for tokens tasks 2015-12-19 11:00:22 +01:00
Smaine Kahlouch
e7e03bae9f calico talks to apiserver with https 2015-12-18 22:22:52 +01:00
Smaine Kahlouch
b81a064242 README, update inventory 2015-12-18 16:40:58 +01:00
Smaine Kahlouch
03d402e226 using hostnames in the inventory is more readable 2015-12-18 16:13:42 +01:00
Smaine Kahlouch
0a238d9853 Specify etcd servers for the etcd cluster 2015-12-18 14:31:51 +01:00
Smaine Kahlouch
4fe0ced5db README, Fix http links 2015-12-18 13:32:03 +01:00
Smaine Kahlouch
c6d65cb535 remove temporary workaround due to node reboot issue with calico 2 2015-12-18 13:25:46 +01:00
Smaine Kahlouch
a0746a3efd remove temporary workaround due to node reboot issue with calico 2015-12-18 13:22:32 +01:00
Smaine Kahlouch
46807c655d Update README 2015-12-18 13:21:22 +01:00
Smaine Kahlouch
970aab70e1 Upgrade calico version to v0.13.0, fixes the node reboot issue 2015-12-18 13:10:26 +01:00
Smaine Kahlouch
4561dd327b remove deprecated var CALICOCTL_PATH 2015-12-18 13:09:42 +01:00
Smaine Kahlouch
94c0c32752 The etcd role is run on all the servers 2015-12-18 11:29:06 +01:00
Smaine Kahlouch
b155e8cc7b Fix error in ETCD_INITIAL_CLUSTER loop 2015-12-18 11:22:56 +01:00
Smaine Kahlouch
9046b7b1bf Configure calico pool on an etcd server 2015-12-18 10:16:03 +01:00
Antoine Legrand
3c450191ea User etcd node ip in initial cluster 2015-12-17 22:47:19 +01:00
Antoine Legrand
184bb8c94d Use 0755 mode for binaries 2015-12-17 22:46:50 +01:00
Antoine Legrand
a003d91576 simplify inventory path 2015-12-17 21:32:06 +01:00
Smaine Kahlouch
9914229484 using ip address instead of inventory_hostname for kube-proxy 2015-12-17 10:43:06 +01:00
Smaine Kahlouch
b3841659d7 Review role order, use master ip even when fqdn are used in the inventory 2015-12-16 23:49:01 +01:00
Smaine Kahlouch
3a349b8519 Using var file for etcd service 2015-12-16 21:43:29 +01:00
Antoine Legrand
6e91b6f47c Merge pull request #22 from ansibl8s/ha_master
- HA (kubernetes and etcd)
- Dockerize kubenertes components (api-server/scheduler/replica-manager/proxy)
2015-12-16 18:11:50 +01:00
ant31
bf5c531037 Merge branch 'master' into ha 2015-12-16 18:09:50 +01:00
ant31
44ac355aa7 Update depedencies 2015-12-16 18:01:52 +01:00
ant31
958c770bef Update ports 2015-12-16 17:43:26 +01:00
ant31
6012230110 Merge branch 'ha_master' of https://github.com/ansibl8s/setup-kubernetes into ha 2015-12-15 17:42:01 +01:00
Smaine Kahlouch
61bb6468ef Update README, cluster.yml 2015-12-15 17:24:37 +01:00
Smaine Kahlouch
f2069b296c BGP peering and loadbalancing vars are managed in a group_vars file 2015-12-15 17:16:19 +01:00
Smaine Kahlouch
9649f2779d Commenting out loadbalancing vars 2015-12-15 17:01:29 +01:00
Smaine Kahlouch
c91a3183d3 manage undefined vars for loadbalancing 2015-12-15 16:51:55 +01:00
ant31
693230ace9 Merge branch 'ha_master' of https://github.com/ansibl8s/setup-kubernetes into ha 2015-12-15 16:28:49 +01:00
ant31
f21f660cc5 Use kube_apiserver_port 2015-12-15 16:27:12 +01:00
Smaine Kahlouch
43afd42f59 use 3 members for etcd clustering 2015-12-15 15:27:12 +01:00
Smaine Kahlouch
4d1828c724 group vars per location 2015-12-15 15:25:24 +01:00
Smaine Kahlouch
953f482585 kube-proxy loadbalancing, need an external loadbalancer 2015-12-15 15:20:08 +01:00
Smaine Kahlouch
4055980ce6 ha apiservers for kubelet 2015-12-15 13:14:27 +01:00
Smaine Kahlouch
e2984b4fdb ha etcd with calico 2015-12-15 11:49:11 +01:00
ant31
394a64f904 Add etcd in apps.yml 2015-12-14 22:42:00 +01:00
Smaine Kahlouch
2fc8b46996 etcd can run on a distinct cluster 2015-12-14 10:39:13 +01:00
Smaine Kahlouch
5efc09710b Renaming hyperkube image vars 2015-12-14 09:54:58 +01:00
Smaine Kahlouch
f908309739 update README with multi-master notes 2015-12-13 16:59:22 +01:00
Smaine Kahlouch
9862afb097 Upgrade kubernetes to v1.1.3 2015-12-13 16:41:18 +01:00
Smaine Kahlouch
59994a6df1 Quickstart documentation 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
0a1b92f348 cluster log level variable 'kube_log_level' 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
af9b945874 add the loadbalancer address to ssl certs 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3cbcd6f189 Calico uses the loadbalancer to reach etcd if 'loadbalancer_address' is defined. The loadbalancer has to be configured first 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
1568cbe8e9 optionnal api runtime extensions 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
eb4dd5f19d update kubectl bash completion 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
fd0e5e756e Update README, new versions 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
f49620517e running kubernetes master processes as pods 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
ef8a46b8c5 Doesn't manage firewall, note: has to be disabled before running the playbook 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
47c211f9c1 upgrading docker version 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
b23b8aa3de dnsmasq with multi master arch 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3981b73924 download only required kubernetes binaries 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
e0ec3e7241 Using one var file per environment is simplier 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
b66cc67b6f Configure network-environment with a single template 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
83c1105192 Configuring calico pool once, before starting calico-node 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
d9a8de487f review roles order 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
d1e19563b0 Master and nodes will run the 'node' role, kube-proxy is run under a container, new script for ssl certs 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3014dfef24 Clustering etcd for ha masters 2015-12-12 19:37:08 +01:00
ant31
b92fa01e05 Remove etcd dir 2015-12-10 23:17:12 +01:00
ant31
e3ebc8e009 Add Rabbitmq 2015-12-10 20:47:59 +01:00
ant31
625efc85af Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-12-10 20:47:15 +01:00
ant31
d30474d305 Add k8s-etcd 2015-12-10 20:46:33 +01:00
Smaine Kahlouch
9cecc30b6d changing proxy mode to default 'userspace', issues with 'iptables' 2015-12-09 15:03:57 +01:00
Smaine Kahlouch
563be70728 disable bgp for master 2015-12-03 15:38:44 +01:00
Smaine Kahlouch
a03f3739dc Add kubectl bash completion, missing script 2015-12-01 15:45:31 +01:00
Smaine Kahlouch
bfe78848fa Add kubectl bash completion 2015-12-01 12:13:22 +01:00
Smaine Kahlouch
126d4e36c8 Fix kube-proxy on master 2015-11-30 16:41:22 +01:00
Smaine Kahlouch
97c4edc028 Add api runtime config option, review kubernetes handlers 2015-11-27 12:32:31 +01:00
Smaine Kahlouch
f74c195d47 updated submodule postgres 2015-11-26 14:16:49 +01:00
Smaine Kahlouch
2374878ef7 Useless tag 'apps' 2015-11-26 09:37:39 +01:00
Smaine Kahlouch
b9e56dd435 Update postgres submodule 2015-11-26 09:34:37 +01:00
ant31
ede5f9592a Add kube-logstash submodule 2015-11-25 14:49:20 +01:00
ant31
a6137b3aee kube-logstash 2015-11-25 14:47:05 +01:00
Smaine Kahlouch
da3920496d add missing vars file 2015-11-24 16:55:53 +01:00
Smaine Kahlouch
895a02e274 change calico pool configuration order 2015-11-22 22:32:45 +01:00
Smaine Kahlouch
b4b20c9dbc Update readme, inventory ex 2015-11-22 18:25:36 +01:00
Smaine Kahlouch
fe8eff07d3 finalize merge kube_1.1.2 2015-11-22 18:15:45 +01:00
Smaine Kahlouch
941cae2a4c README update, 1 distinct playbook for apps 2015-11-22 18:07:52 +01:00
Smaine Kahlouch
4a9a82ca86 include kubernetes config 2015-11-22 18:04:50 +01:00
Smaine Kahlouch
d2ac5ac54b Update requirements.yml file 2015-11-22 18:01:25 +01:00
Smaine Kahlouch
4c2f757fe8 Add kubedash and monitoring submodule 2015-11-22 18:01:25 +01:00
Smaine Kahlouch
e701c3d49d Update README with the current calico version 2015-11-22 13:37:27 +01:00
Smaine Kahlouch
5762d8f301 upgrade flannel and etcd version 2015-11-22 13:35:00 +01:00
Smaine Kahlouch
9a278bae00 Update README with the latest version and simply inventory 2015-11-22 13:34:29 +01:00
Smaine Kahlouch
d3f35e12a2 Simplify docker role, cbr0 for calico isn't required anymore 2015-11-22 13:33:13 +01:00
Smaine Kahlouch
d7b7db34fa move task service kube-api to the end of role master 2015-11-21 17:01:43 +01:00
Smaine Kahlouch
4dd85b5078 move task service kube-api to the end of role master 2015-11-21 17:00:41 +01:00
Antoine Legrand
7f73bb5522 Keep workaround 2015-11-21 14:04:42 +01:00
Smaine Kahlouch
795ce8468d Calico systemd unit improvement (status, stop) 2015-11-21 13:20:39 +01:00
ant31
fb6dd60f52 Rollback 1.8.3 docker 2015-11-20 16:49:02 +01:00
Smaine Kahlouch
e427591545 upgrade kubernetes version to 1.1.2 2015-11-20 16:48:50 +01:00
ant31
9b8c89ebb0 Simplify inventory 2015-11-20 14:31:49 +01:00
ant31
323155b0e1 Fix docker 2015-11-20 14:04:13 +01:00
ant31
f368faf66b Remove --kube-plugin-version 2015-11-20 11:56:16 +01:00
ant31
8fa7811b63 Remove workaround 2015-11-20 11:36:32 +01:00
ant31
c352df6fc8 Add Backup 2015-11-20 11:18:37 +01:00
Smaine Kahlouch
34419d6bae README update, 1 distinct playbook for apps 2015-11-20 11:01:50 +01:00
Smaine Kahlouch
d94bc8e599 Merge pull request #13 from ansibl8s/separate_apps_playbook
Separate apps deploy from cluster deploy
2015-11-20 10:54:46 +01:00
Antoine Legrand
57e1831f78 Update calico to 0.11.0 2015-11-20 10:38:39 +01:00
ant31
1a0208f448 Separate apps deploy from cluster deploy 2015-11-19 22:49:02 +01:00
Smaine Kahlouch
5319f23e73 include kubernetes config 2015-11-18 22:36:56 +01:00
Smaine Kahlouch
b45261b763 remove duplicate task 2015-11-18 21:38:27 +01:00
Smaine Kahlouch
10ade2cbdc Update requirements.yml file 2015-11-18 16:00:47 +01:00
Smaine Kahlouch
471dad44b6 Add kubedash and monitoring submodule 2015-11-18 15:56:13 +01:00
Smaine Kahlouch
3f411bffe4 include config file into systemd unit file 2015-11-16 22:22:19 +01:00
Smaine Kahlouch
5cc29b77aa add option proxy mode iptables for better performances 2015-11-16 22:21:17 +01:00
Smaine Kahlouch
70aa68b9c7 move task network-environment 2015-11-16 22:20:41 +01:00
Smaine Kahlouch
7efaf30d36 update calico-node command line for version 0.10.0 2015-11-16 22:19:19 +01:00
Smaine Kahlouch
0b164bec02 add option proxy mode iptables for better performances 2015-11-16 22:17:21 +01:00
Smaine Kahlouch
3f8f0f550b remove duplicate task 2015-11-16 22:16:36 +01:00
Smaine Kahlouch
d6a790ec46 default docker template condition 2015-11-16 22:15:43 +01:00
Smaine Kahlouch
8eef0db3ec upgrade binaries version 2015-11-16 22:15:12 +01:00
Smaine Kahlouch
2b3543d0ee Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-11-02 13:46:23 +01:00
Smaine Kahlouch
c997860e1c move vars for api socket into group_vars 2015-11-02 13:46:08 +01:00
Smaine Kahlouch
27b0980622 Merge pull request #11 from ansibl8s/replace_default_ipv4_by_var
Add IP var
2015-11-02 13:41:55 +01:00
Smaine Kahlouch
3fb9101e40 default value for 'peer_with_router' 2015-11-02 13:41:03 +01:00
ant31
3bf74530ce Add IP var 2015-11-01 11:12:12 +01:00
Smaine Kahlouch
f6e4cc530c manage default value for 'peer_with_router' var 2015-10-30 16:18:39 +01:00
Smaine Kahlouch
e85fb0460e change docker version in the README 2015-10-28 10:49:09 +01:00
Smaine Kahlouch
f0eb963f5e Tag v1.0 of redis 2015-10-28 10:44:38 +01:00
Smaine Kahlouch
f216302f95 Calico is not a network overlay 2015-10-27 15:49:07 +01:00
Smaine Kahlouch
b98227e9a4 update submodules postgres and kubedns with changes 2015-10-23 16:39:15 +02:00
Smaine Kahlouch
f27a3f047f Update playbook example on README 2015-10-23 16:38:09 +02:00
Smaine Kahlouch
8e585cfdfe agencing vars into submodules 2015-10-23 09:54:44 +02:00
Smaine Kahlouch
0af0a3517f Running apps after cluster setup, update README 2015-10-21 14:05:02 +02:00
Smaine Kahlouch
73e240c644 Running apps after cluster setup 2015-10-21 14:03:39 +02:00
Smaine Kahlouch
533fe3b8e6 Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-10-20 10:19:06 +02:00
Smaine Kahlouch
95403e9d93 Update README 2015-10-20 10:18:30 +02:00
Smaine Kahlouch
250ed9d56b change skydns to kubedns in the requirements 2015-10-19 14:40:16 +02:00
Smaine Kahlouch
6381e75769 move k8s-postgres tag 2015-10-19 11:11:40 +02:00
Smaine Kahlouch
71e4b185c5 duplicate kubedns in .gitmodules 2015-10-18 22:38:14 +02:00
Smaine Kahlouch
a3c5be2c9d tag first version of apps 2015-10-18 22:32:33 +02:00
135 changed files with 21860 additions and 1569 deletions

37
.gitmodules vendored
View File

@@ -1,30 +1,49 @@
[submodule "roles/apps/k8s-kube-ui"]
path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git
[submodule "roles/apps/k8s-skydns"]
path = roles/apps/k8s-skydns
url = https://github.com/ansibl8s/k8s-skydns.git
branch = v1.0
[submodule "roles/apps/k8s-kubedns"]
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
[submodule "roles/apps/k8s-common"]
path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
[submodule "roles/apps/k8s-redis"]
path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
[submodule "roles/apps/k8s-elasticsearch"]
path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git
[submodule "roles/apps/k8s-fabric8"]
path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
[submodule "roles/apps/k8s-memcached"]
path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git
[submodule "roles/apps/k8s-haproxy"]
path = roles/apps/k8s-haproxy
url = https://github.com/ansibl8s/k8s-haproxy.git
branch = v1.0
[submodule "roles/apps/k8s-postgres"]
path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git
[submodule "roles/apps/k8s-kubedns"]
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
[submodule "roles/apps/k8s-kubedash"]
path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
[submodule "roles/apps/k8s-heapster"]
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
[submodule "roles/apps/k8s-influxdb"]
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
[submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git
[submodule "roles/apps/k8s-etcd"]
path = roles/apps/k8s-etcd
url = https://github.com/ansibl8s/k8s-etcd.git
[submodule "roles/apps/k8s-rabbitmq"]
path = roles/apps/k8s-rabbitmq
url = https://github.com/ansibl8s/k8s-rabbitmq.git

41
.travis.yml Normal file
View File

@@ -0,0 +1,41 @@
sudo: required
dist: trusty
language: python
python: "2.7"
addons:
hosts:
- node1
env:
- SITE=cluster.yml
before_install:
- sudo apt-get update -qq
install:
# Install Ansible.
- sudo -H pip install ansible
- sudo -H pip install netaddr
cache:
directories:
- $HOME/releases
- $HOME/.cache/pip
before_script:
- export PATH=$PATH:/usr/local/bin
script:
# Check the role/playbook's syntax.
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --syntax-check"
# Run the role/playbook with ansible-playbook.
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local"
# Run the role/playbook again, checking to make sure it's idempotent.
- >
sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local
| tee /dev/stderr | grep -q 'changed=0.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)

229
README.md
View File

@@ -1,36 +1,104 @@
[![Build Status](https://travis-ci.org/ansibl8s/setup-kubernetes.svg)](https://travis-ci.org/ansibl8s/setup-kubernetes)
kubernetes-ansible
========
Install and configure a kubernetes cluster including network overlay and optionnal addons.
Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
Install and configure a Multi-Master/HA kubernetes cluster including network plugin.
### Requirements
Tested on **Debian Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
The target servers must have access to the Internet in order to pull docker imaqes.
The firewalls are not managed, you'll need to implement your own rules the way you used to.
Ansible v1.9.x
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
* The target servers must have access to the Internet in order to pull docker imaqes.
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
* Ansible v1.9.x and python-netaddr
### Components
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.0.6
* [etcd](https://github.com/coreos/etcd/releases) v2.2.0
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.5.1
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.3
* [docker](https://www.docker.com/) v1.8.2
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.13.0
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
* [docker](https://www.docker.com/) v1.9.1
Quickstart
-------------------------
The following steps will quickly setup a kubernetes cluster with default configuration.
These defaults are good for tests purposes.
Edit the inventory according to the number of servers
```
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
10.115.99.31
[etcd]
10.115.99.31
10.115.99.32
10.115.99.33
[kube-node]
10.115.99.32
10.115.99.33
[k8s-cluster:children]
kube-node
kube-master
```
Run the playbook
```
ansible-playbook -i inventory/inventory.cfg cluster.yml -u root
```
You can jump directly to "*Available apps, installation procedure*"
Ansible
-------------------------
### Download binaries
A role allows to download required binaries which will be stored in a directory defined by the variable
**'local_release_dir'** (by default /tmp).
Please ensure that you have enough disk space there (about **1G**).
**Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory.
### Variables
The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```.
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
### Inventory
Below is an example of an inventory.
Note : The bgp vars local_as and peers are not mandatory if the var **'peer_with_router'** is set to false
By default this variable is set to false and therefore all the nodes are configure in **'node-mesh'** mode.
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
```
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
[etcd]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
[kube-node]
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
node4 ansible_ssh_host=10.99.0.5
node5 ansible_ssh_host=10.99.0.36
node6 ansible_ssh_host=10.99.0.37
[paris]
node1 ansible_ssh_host=10.99.0.26
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
[new-york]
node2 ansible_ssh_host=10.99.0.27
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
[k8s-cluster:children]
kube-node
kube-master
```
### Playbook
```
@@ -42,66 +110,72 @@ The main variables to change are located in the directory ```environments/[env_n
- hosts: k8s-cluster
roles:
- { role: etcd, tags: etcd }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
- { role: kubernetes/node, tags: node }
- { role: etcd, tags: etcd }
- { role: dnsmasq, tags: dnsmasq }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
- { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node
roles:
- { role: kubernetes/node, tags: node }
```
### Run
It is possible to define variables for different environments.
For instance, in order to deploy the cluster on 'dev' environment run the following command.
```
ansible-playbook -i environments/dev/inventory cluster.yml -u root
ansible-playbook -i inventory/dev/inventory.cfg cluster.yml -u root
```
Kubernetes
-------------------------
### Multi master notes
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
the server address has to be present on both groups 'kube-master' and 'kube-node'.
* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
* For safety reasons, you should have at least two master nodes and 3 etcd servers
* Kube-proxy doesn't support multiple apiservers on startup ([Issue 18174](https://github.com/kubernetes/kubernetes/issues/18174)). An external loadbalancer needs to be configured.
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
### Network Overlay
You can choose between 2 network overlays. Only one must be chosen.
You can choose between 2 network plugins. Only one must be chosen.
* **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel'))
* **flannel**: gre/vxlan (layer 2) networking. ([official docs](https://github.com/coreos/flannel))
* **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/'))
* **calico**: bgp (layer 3) networking. ([official docs](http://docs.projectcalico.org/en/0.13/))
The choice is defined with the variable '**overlay_network_plugin**'
The choice is defined with the variable '**kube_network_plugin**'
### Expose a service
There are several loadbalancing solutions.
The ones i found suitable for kubernetes are [Vulcand]('http://vulcand.io/') and [Haproxy]('http://www.haproxy.org/')
The one i found suitable for kubernetes are [Vulcand](http://vulcand.io/) and [Haproxy](http://www.haproxy.org/)
My cluster is working with haproxy and kubernetes services are configured with the loadbalancing type '**nodePort**'.
eg: each node opens the same tcp port and forwards the traffic to the target pod wherever it is located.
Then Haproxy can be configured to request kubernetes's api in order to loadbalance on the proper tcp port on the nodes.
Please refer to the proper kubernetes documentation on [Services]('https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md')
Please refer to the proper kubernetes documentation on [Services](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md)
### Check cluster status
#### Kubernetes components
Master processes : kube-apiserver, kube-scheduler, kube-controller, kube-proxy
Nodes processes : kubelet, kube-proxy, [calico-node|flanneld]
* Check the status of the processes
```
systemctl status [process_name]
systemctl status kubelet
```
* Check the logs
```
journalctl -ae -u [process_name]
journalctl -ae -u kubelet
```
* Check the NAT rules
@@ -109,15 +183,26 @@ journalctl -ae -u [process_name]
iptables -nLv -t nat
```
For the master nodes you'll have to see the docker logs for the apiserver
```
docker logs [apiserver docker id]
```
### Available apps, installation procedure
There are two ways of installing new apps
#### Ansible galaxy
#### Available apps, installation procedure
Additionnal apps can be installed with ```ansible-galaxy```.
you'll need to edit the file '*requirements.yml*' in order to chose needed apps.
ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
The list of available apps are available [there](https://github.com/ansibl8s)
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
In order to use this role you'll need the following entries in the file '*requirements.yml*'
Please refer to the [k8s-kubedns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
```
- src: https://github.com/ansibl8s/k8s-common.git
path: roles/apps
@@ -139,16 +224,34 @@ Then download the roles with ansible-galaxy
ansible-galaxy install -r requirements.yml
```
Finally update your playbook with the chosen role, and run it
#### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
In order to install the dns addon you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
```
...
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
...
```
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
```
ansible-playbook -i inventory/inventory.cfg apps.yml -u root
```
#### Calico networking
Check if the calico-node container is running
@@ -173,38 +276,4 @@ calicoctl endpoint show --detail
```
#### Flannel networking
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.0/basicstutorials.html)
Known issues
-------------
### Node reboot and Calico
There is a major issue with calico-kubernetes version 0.5.1 and kubernetes prior to 1.1 :
After host reboot, the pods networking are not configured again, they are started without any network configuration.
This issue will be fixed when kubernetes 1.1 will be released as described in this [issue](https://github.com/projectcalico/calico-kubernetes/issues/34)
### Monitoring addon
Until now i didn't managed to get the monitoring addon working.
### Apiserver listen on secure port only
Currently the api-server listens on both secure and insecure ports.
The insecure port is mainly used for calico.
Will be fixed soon.
How to contribute
------------------
### Update available roles
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
In order to use this role you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)

29
apps.yml Normal file
View File

@@ -0,0 +1,29 @@
---
- hosts: kube-master
roles:
# System
- { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] }
# Databases
- { role: apps/k8s-postgres, tags: 'postgres' }
- { role: apps/k8s-elasticsearch, tags: 'elasticsearch' }
- { role: apps/k8s-memcached, tags: 'memcached' }
- { role: apps/k8s-redis, tags: 'redis' }
# Msg Broker
- { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
# Monitoring
- { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']}
- { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']}
- { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']}
# logging
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
# Console
- { role: apps/k8s-fabric8, tags: 'fabric8' }
- { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']}
# ETCD
- { role: apps/k8s-etcd, tags: 'etcd'}

View File

@@ -6,18 +6,13 @@
- hosts: k8s-cluster
roles:
- { role: etcd, tags: etcd }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
- { role: kubernetes/node, tags: node }
- { role: etcd, tags: etcd }
- { role: dnsmasq, tags: dnsmasq }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
# Apps to be installed
# - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
# - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node
roles:
- { role: kubernetes/node, tags: node }

View File

@@ -1,6 +0,0 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"

View File

@@ -1,36 +0,0 @@
[downloader]
172.16.0.1
[kube-master]
# NB : the br_addr must be in the {{ calico_pool }} subnet
# it will assign a /24 subnet per node
172.16.0.1 br_addr=10.233.64.1
[etcd]
172.16.0.1
[kube-node:children]
usa
france
[usa]
172.16.0.1 br_addr=10.233.64.1
# Configure the as assigned to the each node if bgp peering with border routers is enabled
172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
[france]
192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
[k8s-cluster:children]
kube-node
kube-master
# If you want to configure bgp peering with border router you'll need to set the following vars
# List of routers and their as number
#[usa:vars]
#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
#
#[france:vars]
#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]

View File

@@ -1,6 +0,0 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"

View File

@@ -1,57 +0,0 @@
# Users to create for basic auth in Kubernetes API via HTTP
# kube_users:
# kube:
# pass: changeme
# role: admin
# root:
# pass: changeme
# role: admin
# Kubernetes cluster name, also will be used as DNS domain
# cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used
# overlay_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
# kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
# overlay_network_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be
# unused in your environment.
# overlay_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
# overlay_network_host_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
# peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
# upstream_dns_servers:
# - 8.8.8.8
# - 4.4.8.8
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
# dns_setup: true
# dns_domain: "{{ cluster_name }}"
#
# # Ip address of the kubernetes dns service
# dns_server: 10.233.0.10

View File

@@ -1,17 +1,27 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_users:
kube:
pass: changeme
role: admin
root:
pass: changeme
role: admin
# root:
# pass: changeme
# role: admin
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used
overlay_network_plugin: calico
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
@@ -19,23 +29,27 @@ kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
overlay_network_subnet: 10.233.64.0/18
kube_pods_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be
# unused in your environment.
# overlay_network_prefix: 18
# entire network. Must be unused in your environment.
# kube_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
overlay_network_host_prefix: 24
kube_network_node_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
@@ -48,10 +62,25 @@ peer_with_router: false
upstream_dns_servers:
- 8.8.8.8
- 4.4.8.8
# Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true
dns_domain: "{{ cluster_name }}"
#
# # Ip address of the kubernetes dns service
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# Ip address of the kubernetes dns service
dns_server: 10.233.0.10
# For multi masters architecture:
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
# This domain name will be inserted into the /etc/hosts file of all servers
# configuration example with haproxy :
# listen kubernetes-apiserver-https
# bind 10.99.0.21:8383
# option ssl-hello-chk
# mode tcp
# timeout client 3h
# timeout server 3h
# server master1 10.99.0.26:443
# server master2 10.99.0.27:443
# balance roundrobin
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"

View File

@@ -0,0 +1,10 @@
#---
#peers:
# -router_id: "10.99.0.34"
# as: "65xxx"
# - router_id: "10.99.0.35"
# as: "65xxx"
#
#loadbalancer_apiserver:
# address: "10.99.0.44"
# port: "8383"

View File

@@ -0,0 +1,10 @@
#---
#peers:
# -router_id: "10.99.0.2"
# as: "65xxx"
# - router_id: "10.99.0.3"
# as: "65xxx"
#
#loadbalancer_apiserver:
# address: "10.99.0.21"
# port: "8383"

View File

@@ -0,0 +1,32 @@
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
[etcd]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
[kube-node]
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
node4 ansible_ssh_host=10.99.0.5
node5 ansible_ssh_host=10.99.0.36
node6 ansible_ssh_host=10.99.0.37
[paris]
node1 ansible_ssh_host=10.99.0.26
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
[new-york]
node2 ansible_ssh_host=10.99.0.27
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
[k8s-cluster:children]
kube-node
kube-master

17
inventory/local-tests.cfg Normal file
View File

@@ -0,0 +1,17 @@
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
[downloader]
node1
[kube-master]
node1
[etcd]
node1
[kube-node]
node1
[k8s-cluster:children]
kube-node
kube-master

View File

@@ -1,19 +1,19 @@
---
- src: https://github.com/ansibl8s/k8s-common.git
path: roles/apps
# version: v1.0
version: v1.0
- src: https://github.com/ansibl8s/k8s-skydns.git
- src: https://github.com/ansibl8s/k8s-kubedns.git
path: roles/apps
# version: v1.0
version: v1.0
#- src: https://github.com/ansibl8s/k8s-kube-ui.git
# path: roles/apps
# # version: v1.0
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-fabric8.git
# path: roles/apps
# # version: v1.0
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git
# path: roles/apps
@@ -25,12 +25,17 @@
#
#- src: https://github.com/ansibl8s/k8s-memcached.git
# path: roles/apps
# # version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-haproxy.git
# path: roles/apps
# # version: v1.0
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-postgres.git
# path: roles/apps
# # version: v1.0
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-heapster.git
# path: roles/apps
#
#- src: https://github.com/ansibl8s/k8s-influxdb.git
# path: roles/apps
#
#- src: https://github.com/ansibl8s/k8s-kubedash.git
# path: roles/apps

1
roles/apps/k8s-etcd Submodule

Submodule roles/apps/k8s-etcd added at abd61ee91a

View File

@@ -1,3 +0,0 @@
---
- name: restart dnsmasq
command: systemctl restart dnsmasq

View File

@@ -5,54 +5,97 @@
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
state: present
backup: yes
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']
- name: populate kubernetes loadbalancer address into hosts file
lineinfile:
dest: /etc/hosts
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local"
state: present
backup: yes
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
- name: clean hosts file
lineinfile:
dest: /etc/hosts
regexp: "{{ item }}"
state: absent
backup: yes
with_items:
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
- '^::1(\s+){{ inventory_hostname }}.*'
- name: install dnsmasq and bindr9utils
apt:
name: "{{ item }}"
state: present
with_items:
- dnsmasq
- bind9utils
when: inventory_hostname in groups['kube-master'][0]
- name: ensure dnsmasq.d directory exists
file:
path: /etc/dnsmasq.d
state: directory
when: inventory_hostname in groups['kube-master'][0]
when: inventory_hostname in groups['kube-master']
- name: configure dnsmasq
template:
src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d/01-kube-dns.conf
mode: 755
notify:
- restart dnsmasq
when: inventory_hostname in groups['kube-master'][0]
backup: yes
when: inventory_hostname in groups['kube-master']
- name: enable dnsmasq
service:
name: dnsmasq
state: started
enabled: yes
when: inventory_hostname in groups['kube-master'][0]
- name: create dnsmasq pod template
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
when: inventory_hostname in groups['kube-master']
- name: update resolv.conf with new DNS setup
template:
src: resolv.conf.j2
dest: /etc/resolv.conf
mode: 644
- name: Check for dnsmasq port
wait_for:
port: 53
delay: 5
timeout: 100
when: inventory_hostname in groups['kube-master']
- name: check resolvconf
stat: path=/etc/resolvconf/resolv.conf.d/head
register: resolvconf
- name: target resolv.conf file
set_fact:
resolvconffile: >
{%- if resolvconf.stat.exists == True -%}
/etc/resolvconf/resolv.conf.d/head
{%- else -%}
/etc/resolv.conf
{%- endif -%}
- name: Add search resolv.conf
lineinfile:
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
dest: "{{resolvconffile}}"
state: present
insertafter: EOF
backup: yes
follow: yes
- name: Add all masters as nameserver
lineinfile:
line: nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }}
dest: "{{resolvconffile}}"
state: present
insertafter: EOF
backup: yes
follow: yes
with_items: groups['kube-master']
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x backup=yes
when: ansible_os_family == "Debian"
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
when: ansible_os_family == "RedHat"
- name: update resolvconf
command: resolvconf -u
changed_when: False
when: resolvconf.stat.exists == True
- meta: flush_handlers

View File

@@ -0,0 +1,49 @@
---
apiVersion: v1
kind: Pod
metadata:
name: dnsmasq
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: dnsmasq
image: andyshinn/dnsmasq:2.72
command:
- dnsmasq
args:
- -k
- "-7"
- /etc/dnsmasq.d
- --local-service
securityContext:
capabilities:
add:
- NET_ADMIN
imagePullPolicy: Always
resources:
limits:
cpu: 100m
memory: 256M
ports:
- name: dns
containerPort: 53
hostPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
hostPort: 53
protocol: TCP
volumeMounts:
- name: etcdnsmasqd
mountPath: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
mountPath: /etc/dnsmasq.d-available
volumes:
- name: etcdnsmasqd
hostPath:
path: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
hostPath:
path: /etc/dnsmasq.d-available

View File

@@ -1,5 +0,0 @@
; generated by ansible
search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
{% for host in groups['kube-master'] %}
nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
{% endfor %}

2
roles/docker/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.*.swp
.vagrant

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
[Service]
EnvironmentFile=-/etc/default/docker
Type=notify
ExecStart=/usr/bin/docker daemon -H fd:// $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -1,12 +0,0 @@
---
- name: restart docker
command: /bin/true
notify:
- reload systemd
- restart docker service
- name: reload systemd
shell: systemctl daemon-reload
- name: restart docker service
service: name=docker state=restarted

View File

@@ -1,33 +0,0 @@
---
- name: Write script for calico/docker bridge configuration
template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure calico/docker bridge
shell: /etc/network/if-up.d/create_cbr
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure docker to use cbr0 bridge
lineinfile:
dest=/etc/default/docker
regexp='.*DOCKER_OPTS=.*'
line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
notify:
- restart docker
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: enable docker
service:
name: docker
enabled: yes
state: started
tags:
- docker
- meta: flush_handlers
#- name: login to arkena's docker registry
# shell : >
# docker login --username={{ dockerhub_user }}
# --password={{ dockerhub_pass }}
# --email={{ dockerhub_email }}

View File

@@ -1,24 +0,0 @@
---
- name: Install prerequisites for https transport
apt: pkg={{ item }} state=present update_cache=yes
with_items:
- apt-transport-https
- ca-certificates
- name: Configure docker apt repository
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list
- name: Install docker-engine
apt: pkg={{ item }} state=present force=yes update_cache=yes
with_items:
- aufs-tools
- cgroupfs-mount
- docker-engine=1.8.2-0~{{ ansible_distribution_release }}
- name: Copy default docker configuration
template: src=default-docker.j2 dest=/etc/default/docker
notify: restart docker
- name: Copy Docker systemd unit file
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service
notify: restart docker

View File

@@ -1,3 +1,53 @@
---
- include: install.yml
- include: configure.yml
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- name: check for minimum kernel version
fail:
msg: >
docker requires a minimum kernel version of
{{ docker_kernel_min_version }} on
{{ ansible_distribution }}-{{ ansible_distribution_version }}
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
- name: ensure docker requirements packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args: docker_package_info.args
with_items: docker_package_info.pre_pkgs
when: docker_package_info.pre_pkgs|length > 0
- name: ensure docker repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}"
args: docker_repo_key_info.args
with_items: docker_repo_key_info.repo_keys
when: docker_repo_key_info.repo_keys|length > 0
- name: ensure docker repository is enabled
action: "{{ docker_repo_info.pkg_repo }}"
args: docker_repo_info.args
with_items: docker_repo_info.repos
when: docker_repo_info.repos|length > 0
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args: docker_package_info.args
with_items: docker_package_info.pkgs
when: docker_package_info.pkgs|length > 0
- name: ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes
state: started
with_items:
- docker

View File

@@ -1,14 +0,0 @@
#!/bin/bash
# Create calico bridge cbr0 if it doesn't exist
ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
if ! [[ "${ifaces}" =~ "cbr0" ]];then
brctl addbr cbr0
ip link set cbr0 up
fi
# Configure calico bridge ip
br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
fi

View File

@@ -1,15 +0,0 @@
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %}
DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
{% endif %}
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"

View File

@@ -1 +0,0 @@
deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main

View File

@@ -0,0 +1,24 @@
docker_kernel_min_version: '2.6.32-431'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- epel-release
- curl
- device-mapper-libs
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@@ -0,0 +1,36 @@
docker_kernel_min_version: '3.2'
docker_package_info:
pkg_mgr: apt
args:
pkg: "{{ item }}"
update_cache: yes
cache_valid_time: 600
state: latest
pre_pkgs:
- apt-transport-https
- curl
- software-properties-common
pkgs:
- docker-engine
docker_repo_key_info:
pkg_key: apt_key
args:
id: "{{ item }}"
keyserver: hkp://p80.pool.sks-keyservers.net:80
state: present
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
docker_repo_info:
pkg_repo: apt_repository
args:
repo: "{{ item }}"
update_cache: yes
state: present
repos:
- >
deb https://apt.dockerproject.org/repo
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@@ -0,0 +1,22 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- curl
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@@ -1,4 +0,0 @@
---
#dockerhub_user:
#dockerhub_pass:
#dockerhub_email:

View File

@@ -0,0 +1,22 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- curl
pkgs:
- docker
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@@ -1,5 +1,42 @@
---
etcd_download_url: https://github.com/coreos/etcd/releases/download
flannel_download_url: https://github.com/coreos/flannel/releases/download
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
local_release_dir: /tmp
flannel_version: 0.5.5
calico_version: v0.13.0
calico_plugin_version: v0.7.0
kube_version: v1.1.3
kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d"
kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482"
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
downloads:
- name: calico
dest: calico/bin/calicoctl
url: "{{calico_download_url}}"
- name: calico-plugin
dest: calico/bin/calico
url: "{{calico_plugin_download_url}}"
- name: flannel
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
url: "{{flannel_download_url}}"
unarchive: yes
- name: kubernetes-kubelet
dest: kubernetes/bin/kubelet
sha256: "{{kubelet_checksum}}"
url: "{{ kube_download_url }}/kubelet"
- name: kubernetes-kubectl
dest: kubernetes/bin/kubectl
sha256: "{{kubectl_checksum}}"
url: "{{ kube_download_url }}/kubectl"

View File

@@ -1,21 +0,0 @@
---
- name: Create calico release directory
local_action: file
path={{ local_release_dir }}/calico/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if calicoctl has been downloaded
local_action: stat
path={{ local_release_dir }}/calico/bin/calicoctl
register: c_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download calico
local_action: shell
curl -o {{ local_release_dir }}/calico/bin/calicoctl -Ls {{ calico_download_url }}/{{ calico_version }}/calicoctl
when: not c_tar.stat.exists
register: dl_calico
delegate_to: "{{ groups['kube-master'][0] }}"

View File

@@ -1,42 +0,0 @@
---
- name: Create etcd release directory
local_action: file
path={{ local_release_dir }}/etcd/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if etcd release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
register: e_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download etcd
local_action: shell
curl -o {{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz -Ls {{ etcd_download_url }}/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
when: not e_tar.stat.exists
register: dl_etcd
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract etcd archive
local_action: unarchive
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/etcd copy=no
when: dl_etcd|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only etcd binaries
local_action: copy
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/{{ item }}
dest={{ local_release_dir }}/etcd/bin
with_items:
- etcdctl
- etcd
when: dl_etcd|changed
- name: Delete unused etcd files
local_action: file
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64 state=absent
when: dl_etcd|changed

View File

@@ -1,39 +0,0 @@
---
- name: Create flannel release directory
local_action: file
path={{ local_release_dir }}/flannel
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if flannel release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
register: f_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download flannel
local_action: shell
curl -o {{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz -Ls {{ flannel_download_url }}/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz
when: not f_tar.stat.exists
register: dl_flannel
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract flannel archive
local_action: unarchive
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/flannel copy=no
when: dl_flannel|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only flannel binaries
local_action: copy
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}/flanneld
dest={{ local_release_dir }}/flannel/bin
when: dl_flannel|changed
- name: Delete unused flannel files
local_action: file
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }} state=absent
when: dl_flannel|changed

View File

@@ -1,47 +0,0 @@
---
- name: Create kubernetes release directory
local_action: file
path={{ local_release_dir }}/kubernetes
state=directory
- name: Check if kubernetes release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
# issues with get_url module and redirects, to be tested again in the near future
- name: Download kubernetes
local_action: shell
curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
register: dl_kube
- name: Compare kubernetes archive checksum
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
when: dl_kube|changed
- name: Extract kubernetes archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Extract kubernetes binaries archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Pick up only kubernetes binaries
local_action: synchronize
src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
dest={{ local_release_dir }}/kubernetes
when: dl_kube|changed
- name: Delete unused kubernetes files
local_action: file
path={{ local_release_dir }}/kubernetes/kubernetes state=absent
when: dl_kube|changed

View File

@@ -1,5 +1,19 @@
---
- include: kubernetes.yml
- include: etcd.yml
- include: calico.yml
- include: flannel.yml
- name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
with_items: downloads
- name: Download items
get_url:
url: "{{item.url}}"
dest: "{{local_release_dir}}/{{item.dest}}"
sha256sum: "{{item.sha256 | default(omit)}}"
with_items: downloads
- name: Extract archives
unarchive:
src: "{{ local_release_dir }}/{{item.dest}}"
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
copy: no
when: "{{item.unarchive is defined and item.unarchive == True}}"
with_items: downloads

View File

@@ -1,8 +0,0 @@
---
etcd_version: v2.2.0
flannel_version: 0.5.3
kube_version: v1.0.6
kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
calico_version: v0.5.1

View File

@@ -1,15 +0,0 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart etcd2
- name: reload systemd
command: systemctl daemon-reload
- name: restart etcd2
service: name=etcd2 state=restarted
- name: Save iptables rules
command: service iptables save

View File

@@ -1,15 +0,0 @@
---
- name: Disable ferm
service: name=ferm state=stopped enabled=no
- name: Create etcd2 environment vars dir
file: path=/etc/systemd/system/etcd2.service.d state=directory
- name: Write etcd2 config file
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf
notify:
- reload systemd
- restart etcd2
- name: Ensure etcd2 is running
service: name=etcd2 state=started enabled=yes

View File

@@ -1,24 +0,0 @@
---
- name: Create etcd user
user: name=etcd shell=/bin/nologin home=/var/lib/etcd2
- name: Install etcd binaries
copy:
src={{ local_release_dir }}/etcd/bin/{{ item }}
dest={{ bin_dir }}
owner=etcd
mode=u+x
with_items:
- etcdctl
- etcd
notify:
- restart daemons
- name: Create etcd2 binary symlink
file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
- name: Copy etcd2.service systemd file
template:
src: systemd-etcd2.service.j2
dest: /lib/systemd/system/etcd2.service
notify: restart daemons

View File

@@ -1,3 +1,13 @@
---
- include: install.yml
- include: configure.yml
- name: ETCD2 | Stop etcd2 service
service: name=etcd state=stopped
ignore_errors: yes
- name: ETCD2 | create etcd pod template
template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest
- name: ETCD2 | Check for etcd2 port
wait_for:
port: 2379
delay: 5
timeout: 100

View File

@@ -0,0 +1,54 @@
---
apiVersion: v1
kind: Pod
metadata:
name: etcd
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: etcd
image: quay.io/coreos/etcd:v2.2.2
resources:
limits:
cpu: 100m
memory: 256M
args:
{% if inventory_hostname in groups['etcd'] %}
- --name
- etcd-{{inventory_hostname}}-master
- --advertise-client-urls
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
- --listen-peer-urls
- http://0.0.0.0:2380
- --initial-advertise-peer-urls
- http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380
- --data-dir
- /var/etcd/data
- --initial-cluster-state
- new
{% else %}
- --proxy
- 'on'
{% endif %}
- --listen-client-urls
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"
- --initial-cluster
- "{% for host in groups['etcd'] %}etcd-{{host}}-master=http://{{ hostvars[host]['ip'] | default( hostvars[host]['ansible_default_ipv4']['address']) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
- --initial-cluster-token
- etcd-k8s-cluster
ports:
- name: etcd-client
containerPort: 2379
hostPort: 2379
- name: etcd-peer
containerPort: 2380
hostPort: 2380
volumeMounts:
- name: varetcd
mountPath: /var/etcd
readOnly: false
volumes:
- name: varetcd
hostPath:
path: /containers/pods/etcd-{{inventory_hostname}}/rootfs/var/etcd

View File

@@ -1,17 +0,0 @@
# etcd2.0
[Service]
{% if inventory_hostname in groups['kube-master'] %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER_STATE=new"
Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
Environment="ETCD_NAME=master"
{% else %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_PROXY=on"
{% endif %}

View File

@@ -1,15 +0,0 @@
[Unit]
Description=etcd2
Conflicts=etcd.service
[Service]
User=etcd
Environment=ETCD_DATA_DIR=/var/lib/etcd2
Environment=ETCD_NAME=%m
ExecStart={{ bin_dir }}/etcd2
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View File

@@ -1,115 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Caller should set in the ev:
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
# MASTER_NAME - I'm not sure what it is...
# Also the following will be respected
# CERT_DIR - where to place the finished certs
# CERT_GROUP - who the group owner of the cert files should be
cert_ip="${MASTER_IP:="${1}"}"
master_name="${MASTER_NAME:="kubernetes"}"
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
dns_domain="${DNS_DOMAIN:="cluster.local"}"
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
cert_group="${CERT_GROUP:="kube-cert"}"
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
# TODO: Add support for discovery on other providers?
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
# but is originally taken from:
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
#
# To update, do the following:
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
#
# Due to GCS caching of public objects, it may take time for this to be widely
# distributed.
# Calculate the first ip address in the service range
octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octects[3]+=1))
service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
# Determine appropriete subject alt names
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
tar xzf easy-rsa.tar.gz > /dev/null
cd easy-rsa-master/easyrsa3
(./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
echo "=== Failed to generate certificates: Aborting ==="
exit 2
}
mkdir -p "$cert_dir"
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
for cert in "${CERTS[@]}"; do
chgrp "${cert_group}" "${cert_dir}/${cert}"
chmod 660 "${cert_dir}/${cert}"
done

View File

@@ -1,3 +0,0 @@
---
dependencies:
- { role: etcd }

View File

@@ -1,42 +0,0 @@
---
#- name: Get create ca cert script from Kubernetes
# get_url:
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
# force=yes
- name: certs | install cert generation script
copy:
src=make-ca-cert.sh
dest={{ kube_script_dir }}
mode=0500
changed_when: false
# FIXME This only generates a cert for one master...
- name: certs | run cert generation script
command:
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
args:
creates: "{{ kube_cert_dir }}/server.crt"
environment:
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
MASTER_NAME: "{{ inventory_hostname }}"
DNS_DOMAIN: "{{ dns_domain }}"
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
CERT_DIR: "{{ kube_cert_dir }}"
CERT_GROUP: "{{ kube_cert_group }}"
- name: certs | check certificate permissions
file:
path={{ item }}
group={{ kube_cert_group }}
owner=kube
mode=0440
with_items:
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/server.crt"
- "{{ kube_cert_dir }}/server.key"
- "{{ kube_cert_dir }}/kubecfg.crt"
- "{{ kube_cert_dir }}/kubecfg.key"
- "{{ kube_cert_dir }}/kubelet.crt"
- "{{ kube_cert_dir }}/kubelet.key"

View File

@@ -1,30 +0,0 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
- "{{ groups['kube-master'][0] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet', 'system:proxy' ]
- "{{ groups['kube-node'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons

View File

@@ -1,29 +0,0 @@
---
- name: define alias command for kubectl all
lineinfile:
dest=/etc/bash.bashrc
line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
regexp='^alias kball=.*$'
state=present
insertafter=EOF
create=True
- name: create kubernetes config directory
file: path={{ kube_config_dir }} state=directory
- name: create kubernetes script directory
file: path={{ kube_script_dir }} state=directory
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory
- name: write the global config file
template:
src: config.j2
dest: "{{ kube_config_dir }}/config"
notify:
- restart daemons
- include: secrets.yml
tags:
- secrets

View File

@@ -1,50 +0,0 @@
---
- name: certs | create system kube-cert groups
group: name={{ kube_cert_group }} state=present system=yes
- name: create system kube user
user:
name=kube
comment="Kubernetes user"
shell=/sbin/nologin
state=present
system=yes
groups={{ kube_cert_group }}
- name: certs | make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- name: tokens | make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- include: gen_certs.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]
- name: Read back the CA certificate
slurp:
src: "{{ kube_cert_dir }}/ca.crt"
register: ca_cert
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: certs | register the CA certificate as a fact for later use
set_fact:
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
- name: certs | write CA certificate everywhere
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
notify:
- restart daemons
- include: gen_tokens.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]

View File

@@ -1,26 +0,0 @@
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# Comma separated list of nodes in the etcd cluster
# KUBE_ETCD_SERVERS="--etcd_servers="
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=5"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
# How the replication controller, scheduler, and proxy
KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"

File diff suppressed because it is too large Load Diff

View File

@@ -1,32 +1,14 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart apiserver
- restart controller-manager
- restart scheduler
- restart proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart apiserver
service:
name: kube-apiserver
state: restarted
- name: restart systemd-kubelet
command: /bin/true
notify:
- reload systemd
- restart kubelet
- name: restart controller-manager
- name: restart kubelet
service:
name: kube-controller-manager
state: restarted
- name: restart scheduler
service:
name: kube-scheduler
state: restarted
- name: restart proxy
service:
name: kube-proxy
name: kubelet
state: restarted

View File

@@ -1,3 +1,4 @@
---
dependencies:
- { role: kubernetes/common }
- { role: etcd }
- { role: kubernetes/node }

View File

@@ -1,87 +0,0 @@
---
- name: get the node token values from token files
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:controller_manager"
- "system:scheduler"
- "system:kubectl"
- "system:proxy"
register: tokens
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
proxy_token: "{{ tokens.results[3].content|b64decode }}"
- name: write the config files for api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
notify:
- restart daemons
- name: write config file for controller-manager
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
notify:
- restart controller-manager
- name: write the kubecfg (auth) file for controller-manager
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
notify:
- restart controller-manager
- name: write the config file for scheduler
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
notify:
- restart scheduler
- name: write the kubecfg (auth) file for scheduler
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
notify:
- restart scheduler
- name: write the kubecfg (auth) file for kubectl
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
- name: write the config files for proxy
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
notify:
- restart daemons
- name: write the kubecfg (auth) file for proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
- name: populate users for basic auth in API
lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
with_dict: "{{ kube_users }}"
notify:
- restart apiserver
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started
- name: Enable controller-manager
service:
name: kube-controller-manager
enabled: yes
state: started
- name: Enable scheduler
service:
name: kube-scheduler
enabled: yes
state: started
- name: Enable kube-proxy
service:
name: kube-proxy
enabled: yes
state: started

View File

@@ -1,34 +0,0 @@
---
- name: Write kube-apiserver systemd init file
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
notify: restart daemons
- name: Write kube-controller-manager systemd init file
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
notify: restart daemons
- name: Write kube-scheduler systemd init file
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
notify: restart daemons
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
notify: restart daemons
- name: Install kubernetes binaries
copy:
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
dest={{ bin_dir }}
owner=kube
mode=u+x
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kube-proxy
- kubectl
notify:
- restart daemons
- name: Allow apiserver to bind on both secure and insecure ports
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver

View File

@@ -1,3 +1,82 @@
---
- include: install.yml
- include: config.yml
- name: Copy kubectl bash completion
copy:
src: kubectl_bash_completion.sh
dest: /etc/bash_completion.d/kubectl.sh
- name: Install kubectl binary
synchronize:
src: "{{ local_release_dir }}/kubernetes/bin/kubectl"
dest: "{{ bin_dir }}/kubectl"
archive: no
checksum: yes
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
- name: Perms kubectl binary
file: path={{ bin_dir }}/kubectl owner=kube mode=0755 state=file
- name: populate users for basic auth in API
lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}"
# Sync masters
- name: synchronize auth directories for masters
synchronize:
src: "{{ item }}"
dest: "{{ kube_config_dir }}"
recursive: yes
delete: yes
rsync_opts: [ '--one-file-system']
set_remote_user: false
with_items:
- "{{ kube_token_dir }}"
- "{{ kube_cert_dir }}"
- "{{ kube_users_dir }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: inventory_hostname != "{{ groups['kube-master'][0] }}"
# Write manifests
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apisever.manifest"
notify:
- restart kubelet
- meta: flush_handlers
- name: wait for the apiserver to be running (pulling image and running container)
wait_for:
port: "{{kube_apiserver_insecure_port}}"
delay: 10
timeout: 60
- name: Create 'kube-system' namespace
uri:
url: http://127.0.0.1:{{ kube_apiserver_insecure_port }}/api/v1/namespaces
method: POST
body: '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}'
status_code: 201,409
body_format: json
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_config_dir }}/kube-controller-manager.manifest"
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_config_dir }}/kube-scheduler.manifest"
- name: Write podmaster manifest
template:
src: manifests/kube-podmaster.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest"

View File

@@ -1,25 +0,0 @@
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#
# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
# KUBELET_PORT="--kubelet_port=10250"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# Add you own!
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"

View File

@@ -1,6 +0,0 @@
###
# The following values are used to configure the kubernetes controller-manager
# defaults from config and apiserver should be adequate
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: controller-manager-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: controller-manager
name: controller-manager-to-{{ cluster_name }}
users:
- name: controller-manager
user:
token: {{ controller_manager_token }}

View File

@@ -4,8 +4,8 @@ current-context: kubectl-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority-data: {{ kube_ca_cert|b64encode }}
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
certificate-authority-data: {{ kube_node_cert|b64encode }}
server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@@ -0,0 +1,52 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- apiserver
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }}
{% if kube_api_runtime_config is defined %}
{% for conf in kube_api_runtime_config %}
- --runtime-config={{ conf }}
{% endfor %}
{% endif %}
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
- --v={{ kube_log_level | default('2') }}
- --allow-privileged=true
ports:
- containerPort: {{ kube_apiserver_port }}
hostPort: {{ kube_apiserver_port }}
name: https
- containerPort: {{ kube_apiserver_insecure_port }}
hostPort: {{ kube_apiserver_insecure_port }}
name: local
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: kubernetes-config
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: {{ kube_config_dir }}
name: kubernetes-config
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --v={{ kube_log_level | default('2') }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 1
volumeMounts:
- mountPath: {{ kube_cert_dir }}
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: {{ kube_cert_dir }}
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-podmaster
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: scheduler-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --key=scheduler
- --source-file={{ kube_config_dir}}/kube-scheduler.manifest
- --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: manifest-src
readOnly: true
- mountPath: {{ kube_manifest_dir }}
name: manifest-dst
- name: controller-manager-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --key=controller
- --source-file={{ kube_config_dir }}/kube-controller-manager.manifest
- --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest
terminationMessagePath: /dev/termination-log
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: manifest-src
readOnly: true
- mountPath: {{ kube_manifest_dir }}
name: manifest-dst
volumes:
- hostPath:
path: {{ kube_config_dir }}
name: manifest-src
- hostPath:
path: {{ kube_manifest_dir }}
name: manifest-dst

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
- --v={{ kube_log_level | default('2') }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 1

View File

@@ -1,7 +0,0 @@
###
# kubernetes proxy config
# default config should be adequate
# Add your own!
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: proxy-to-{{ cluster_name }}
preferences: {}
contexts:
- context:
cluster: {{ cluster_name }}
user: proxy
name: proxy-to-{{ cluster_name }}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: http://{{ groups['kube-master'][0] }}:8080
name: {{ cluster_name }}
users:
- name: proxy
user:
token: {{ proxy_token }}

View File

@@ -1,7 +0,0 @@
###
# kubernetes scheduler config
# default config should be adequate
# Add your own!
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: scheduler-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: scheduler
name: scheduler-to-{{ cluster_name }}
users:
- name: scheduler
user:
token: {{ scheduler_token }}

View File

@@ -1,28 +0,0 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
User=kube
ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,20 +0,0 @@
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
User=kube
ExecStart={{ bin_dir }}/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,21 +0,0 @@
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %}
After=docker.service calico-node.service
{% else %}
After=docker.service
{% endif %}
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,20 +0,0 @@
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
User=kube
ExecStart={{ bin_dir }}/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -11,11 +11,8 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# The port the API Server will be listening on.
kube_master_port: 443
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/certs"
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
@@ -33,9 +30,20 @@ kube_cert_group: kube-cert
dns_domain: "{{ cluster_name }}"
kube_proxy_mode: userspace
# Temporary image, waiting for official google release
# hyperkube_image_repo: gcr.io/google_containers/hyperkube
hyperkube_image_repo: quay.io/smana/hyperkube
hyperkube_image_tag: v1.1.3
# IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS
# server and expose it under this IP address. The IP address must be from
# the range specified as kube_service_addresses. This magic will actually
# pick the 10th ip address in the kube_service_addresses range and use that.
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
kube_api_runtime_config:
- extensions/v1beta1/daemonsets=true
- extensions/v1beta1/deployments=true

View File

@@ -19,7 +19,10 @@ token_file="${token_dir}/known_tokens.csv"
create_accounts=($@)
touch "${token_file}"
if [ ! -e "${token_file}" ]; then
touch "${token_file}"
fi
for account in "${create_accounts[@]}"; do
if grep ",${account}," "${token_file}" ; then
continue

View File

@@ -0,0 +1,107 @@
#!/bin/bash
# Author: skahlouc@skahlouc-laptop
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
usage()
{
cat << EOF
Create self signed certificates
Usage : $(basename $0) -f <config> [-c <cloud_provider>] [-d <ssldir>] [-g <ssl_group>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-c | --cloud : Cloud provider (GCE, AWS or AZURE)
-d | --ssldir : Directory where the certificates will be installed
-g | --sslgrp : Group of the certificates
ex :
$(basename $0) -f openssl.conf -c GCE -d /srv/ssl -g kube
EOF
}
# Options parsing
while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-c | --cloud) CLOUD=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
-g | --group) SSLGRP="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
exit 3
;;
esac
done
if [ -z ${CONFIG} ]; then
echo "ERROR: the openssl configuration file is missing. option -f"
exit 1
fi
if [ -z ${SSLDIR} ]; then
SSLDIR="/etc/kubernetes/certs"
fi
if [ -z ${SSLGRP} ]; then
SSLGRP="kube-cert"
fi
#echo "config=$CONFIG, cloud=$CLOUD, certdir=$SSLDIR, certgroup=$SSLGRP"
SUPPORTED_CLOUDS="GCE AWS AZURE"
# TODO: Add support for discovery on other providers?
if [ "${CLOUD}" == "GCE" ]; then
CLOUD_IP=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "${CLOUD}" == "AWS" ]; then
CLOUD_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [ "${CLOUD}" == "AZURE" ]; then
CLOUD_IP=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
mkdir -p "${SSLDIR}"
# Root CA
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
# Apiserver
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
# Nodes and Admin
for i in node admin; do
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
done
# Install certs
mv *.pem ${SSLDIR}/
chgrp ${SSLGRP} ${SSLDIR}/*
chmod 600 ${SSLDIR}/*-key.pem
chown root:root ${SSLDIR}/*-key.pem

View File

@@ -1,19 +1,14 @@
---
- name: restart daemons
- name: reload systemd
command: systemctl daemon-reload
- name: restart systemd-kubelet
command: /bin/true
notify:
- reload systemd
- restart kubelet
- restart proxy
- name: restart kubelet
service:
name: kubelet
state: restarted
- name: restart proxy
service:
name: kube-proxy
state: restarted
- name: reload systemd
command: systemctl daemon-reload

View File

@@ -1,3 +0,0 @@
---
dependencies:
- { role: kubernetes/common }

View File

@@ -1,55 +0,0 @@
---
- name: Get the node token values
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:kubelet"
- "system:proxy"
register: tokens
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
proxy_token: "{{ tokens.results[1].content|b64decode }}"
- name: Create kubelet environment vars dir
file: path=/etc/systemd/system/kubelet.service.d state=directory
- name: Write kubelet config file
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
notify:
- reload systemd
- restart kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
notify:
- restart kubelet
- name: Create proxy environment vars dir
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
notify:
- reload systemd
- restart proxy
- name: write the kubecfg (auth) file for kube-proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
notify:
- restart proxy
- name: Enable kubelet
service:
name: kubelet
enabled: yes
state: started
- name: Enable proxy
service:
name: kube-proxy
enabled: yes
state: started

View File

@@ -0,0 +1,28 @@
---
- name: certs | install cert generation script
copy:
src=make-ssl.sh
dest={{ kube_script_dir }}
mode=0500
changed_when: false
- name: certs | write openssl config
template:
src: "openssl.conf.j2"
dest: "{{ kube_config_dir }}/.openssl.conf"
- name: certs | run cert generation script
shell: >
{{ kube_script_dir }}/make-ssl.sh
-f {{ kube_config_dir }}/.openssl.conf
-g {{ kube_cert_group }}
-d {{ kube_cert_dir }}
args:
creates: "{{ kube_cert_dir }}/apiserver.pem"
- name: certs | check certificate permissions
file:
path={{ kube_cert_dir }}
group={{ kube_cert_group }}
owner=kube
recurse=yes

View File

@@ -0,0 +1,48 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube-master'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet' ]
- "{{ groups['kube-node'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for calico
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:calico" ]
- "{{ groups['k8s-cluster'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"
- name: tokens | get the calico token values
slurp:
src: "{{ kube_token_dir }}/system:calico-{{ inventory_hostname }}.token"
register: calico_token
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"

View File

@@ -1,20 +1,48 @@
---
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
notify: restart daemons
- debug: msg="{{init_system == "systemd"}}"
- debug: msg="{{init_system}}"
- name: Write kubelet systemd init file
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
notify: restart daemons
- name: install | Write kubelet systemd init file
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
when: init_system == "systemd"
notify: restart systemd-kubelet
- name: Install kubernetes binaries
copy:
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
dest={{ bin_dir }}
owner=kube
mode=u+x
with_items:
- kube-proxy
- kubelet
- name: install | Write kubelet initd script
template: src=deb-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
when: init_system == "sysvinit" and ansible_os_family == "Debian"
notify: restart kubelet
- name: install | Write kubelet initd script
template: src=rh-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
when: init_system == "sysvinit" and ansible_os_family == "RedHat"
notify: restart kubelet
- name: install | Install kubelet binary
synchronize:
src: "{{ local_release_dir }}/kubernetes/bin/kubelet"
dest: "{{ bin_dir }}/kubelet"
times: yes
archive: no
delegate_to: "{{ groups['downloader'][0] }}"
notify:
- restart daemons
- restart kubelet
- name: install | Perms kubelet binary
file: path={{ bin_dir }}/kubelet owner=kube mode=0755 state=file
- name: install | Calico-plugin | Directory
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/ state=directory
when: kube_network_plugin == "calico"
- name: install | Calico-plugin | Binary
synchronize:
src: "{{ local_release_dir }}/calico/bin/calico"
dest: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico"
times: yes
archive: no
delegate_to: "{{ groups['downloader'][0] }}"
when: kube_network_plugin == "calico"
notify: restart kubelet
- name: install | Perms calico plugin binary
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file

View File

@@ -1,4 +1,49 @@
---
- name: create kubernetes config directory
file: path={{ kube_config_dir }} state=directory
- name: create kubernetes script directory
file: path={{ kube_script_dir }} state=directory
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory
- name: certs | create system kube-cert groups
group: name={{ kube_cert_group }} state=present system=yes
- name: create system kube user
user:
name=kube
comment="Kubernetes user"
shell=/sbin/nologin
state=present
system=yes
groups={{ kube_cert_group }}
- include: secrets.yml
tags:
- secrets
- include: install.yml
- include: config.yml
- include: temp_workaround.yml
- name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet backup=yes
notify:
- restart kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
notify:
- restart kubelet
- name: Write proxy manifest
template:
src: manifests/kube-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
- name: Enable kubelet
service:
name: kubelet
enabled: yes
state: started

View File

@@ -0,0 +1,52 @@
---
- name: certs | make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- name: tokens | make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- include: gen_certs.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]
- include: gen_tokens.yml
# Sync certs between nodes
- user:
name: '{{ansible_user_id}}'
generate_ssh_key: yes
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: yes
- name: 'get ssh keypair'
slurp: path=~/.ssh/id_rsa.pub
register: public_key
delegate_to: "{{ groups['kube-master'][0] }}"
- name: 'setup keypair on nodes'
authorized_key:
user: '{{ansible_user_id}}'
key: "{{public_key.content|b64decode }}"
- name: synchronize certificates for nodes
synchronize:
src: "{{ item }}"
dest: "{{ kube_cert_dir }}"
recursive: yes
delete: yes
rsync_opts: [ '--one-file-system']
set_remote_user: false
with_items:
- "{{ kube_cert_dir}}/ca.pem"
- "{{ kube_cert_dir}}/node.pem"
- "{{ kube_cert_dir}}/node-key.pem"
delegate_to: "{{ groups['kube-master'][0] }}"
when: inventory_hostname not in "{{ groups['kube-master'] }}"

Some files were not shown because too many files have changed in this diff Show More