Compare commits

...

207 Commits
v1.1 ... 1.4.0

Author SHA1 Message Date
Antoine Legrand
cf472a6b4c Merge pull request #107 from ansibl8s/race_condition_api_master
Slowdown apimaster restart
2016-01-26 18:00:47 +01:00
ant31
fd6ac61afc Use local etcd/etcdproxy for calico 2016-01-26 17:28:30 +01:00
Antoine Legrand
16a1926f94 Drop ansible 1.9 support 2016-01-26 15:31:11 +01:00
Antoine Legrand
839974bad0 Merge pull request #108 from ansibl8s/set_perms_on_unarchive
Set perms on unarchive
2016-01-26 15:25:28 +01:00
Antoine Legrand
4566d60e6f Slowdown apimaster restart 2016-01-26 15:23:16 +01:00
Antoine Legrand
49a7278563 Set perms on unarchive 2016-01-26 12:17:33 +01:00
Antoine Legrand
8676f8761f Merge pull request #99 from ansibl8s/symlink-dnsmasq-conf
Symlink dnsmasq conf
2016-01-26 00:44:13 +01:00
Antoine Legrand
b9781fa7c2 Symlink dnsmasq conf 2016-01-26 00:30:29 +01:00
Antoine Legrand
08052f60da Merge pull request #105 from ansibl8s/fix_handlers
fix some handlers
2016-01-25 23:00:07 +01:00
Antoine Legrand
44230a4e86 Merge pull request #101 from galthaus/patch-1
Fix download reference in cluster.ml
2016-01-25 22:56:48 +01:00
Smaine Kahlouch
90ffb8489a fix some handlers 2016-01-25 22:49:24 +01:00
Antoine Legrand
238f6e8a0b Remove apt-get update 2016-01-25 22:46:20 +01:00
Antoine Legrand
ef7cf3bf11 Merge pull request #103 from rackn/perm-fix
Force owner and permissions for get_url retrieved files.
2016-01-25 22:01:57 +01:00
Greg Althaus
e7d5b7af67 Force owner and permissions for get_url retrieved
files.  get_url doesn't honor owner and mode is spotty.
2016-01-25 13:30:48 -06:00
Smaine Kahlouch
359e55f6e4 Merge pull request #100 from rackn/cache-update-centos
Need to use separate stanzas for each repo because the
2016-01-25 19:00:57 +01:00
Greg Althaus
dd29c8064f Fix download reference in cluster.ml 2016-01-25 11:23:55 -06:00
Greg Althaus
c7bd2a2a1e Need to use separate stanzas for each repo because the
args are different.  Sigh.
2016-01-25 11:16:56 -06:00
Smaine Kahlouch
87fa167efa Merge pull request #95 from Smana/fix_ha_apiserver
workaround_ha_apiserver
2016-01-25 13:01:03 +01:00
Smaine Kahlouch
baaa6efc2b workaround_ha_apiserver 2016-01-25 12:07:32 +01:00
Antoine Legrand
cece179bd4 Merge pull request #97 from ansibl8s/systemd_reload
Fix systemd reload and calico unit
2016-01-25 11:01:11 +01:00
ant31
56b92812fa Fix systemd reload and calico unit 2016-01-25 10:54:07 +01:00
Antoine Legrand
2cbbcee351 Merge pull request #98 from ansibl8s/setup_cache
Use update_cache when possible
2016-01-25 02:12:19 +01:00
ant31
f5508b1794 Use update_cache when possible 2016-01-25 02:06:34 +01:00
Antoine Legrand
8f7d552401 Merge pull request #93 from ansibl8s/flannel-info
Add flannel vars to enable vagrant and amazon environments
2016-01-24 16:46:04 +01:00
Greg Althaus
bcd6ecb7fb Add flannel vars to enable vagrant and amazon environments 2016-01-24 16:18:35 +01:00
Antoine Legrand
65666fc28a Merge pull request #90 from ansibl8s/add_users_role
AddUser Role
2016-01-24 16:17:24 +01:00
Antoine Legrand
b4734c280a Merge branch 'master' into add_users_role 2016-01-24 15:58:10 +01:00
Antoine Legrand
dd61f685b8 AddUser Role 2016-01-24 11:54:34 +01:00
Antoine Legrand
641ce3358a Merge pull request #91 from Smana/rsync_instead_of_copy
use rsync instead of cp
2016-01-23 23:38:42 +01:00
Smaine Kahlouch
4984b57aa2 use rsync instead of command 2016-01-23 18:26:07 +01:00
Smaine Kahlouch
87d8d87c6e Merge pull request #87 from Smana/apiserver_on_host
Apiserver on host
2016-01-23 17:46:23 +01:00
Smaine Kahlouch
283c4169ac run apiserver as a service
reorder master handlers

typo for sysvinit
2016-01-23 14:21:04 +01:00
Smaine Kahlouch
d5f11b2442 Merge pull request #88 from Smana/complete_remove_downloader
missing commits for the PR #86
2016-01-22 17:25:12 +01:00
Smaine Kahlouch
5edc81c627 moving kube-cert group into group_vars 2016-01-22 17:18:45 +01:00
Smaine Kahlouch
391413f7e7 missing commits for the PR #86 2016-01-22 17:10:31 +01:00
Smaine Kahlouch
c05c60a5d2 Merge pull request #86 from ansibl8s/remove_downloader
Remove downloader host
2016-01-22 17:04:54 +01:00
Smaine Kahlouch
87b42e34e0 create kube-cert group task 2016-01-22 16:51:54 +01:00
Smaine Kahlouch
be0bec9eab add kube-cert group 2016-01-22 16:46:06 +01:00
Smaine Kahlouch
cb59559835 use command instead of synchronize 2016-01-22 16:37:07 +01:00
Antoine Legrand
078b67c50f Remove downloader host 2016-01-22 09:59:39 +01:00
Antoine Legrand
e95c4739f5 Merge pull request #82 from rackn/etcd-sync
Fix etcd synchronize to other nodes from the downloader
2016-01-21 20:39:52 +01:00
Greg Althaus
32877bdc7b Merge branch 'master' into etcd-sync 2016-01-21 13:13:58 -06:00
Antoine Legrand
5e3af86c26 Merge pull request #84 from rackn/init-system-fix
Test for a systemd service that should be up.
2016-01-21 20:07:47 +01:00
Greg Althaus
ec1073def8 Test for a systemd service that should be up. 2016-01-21 11:35:15 -06:00
Greg Althaus
28e530e005 Fix etcd synchronize to other nodes from the downloader 2016-01-21 11:21:25 -06:00
Smaine Kahlouch
9e9aba4e3a Merge pull request #79 from Smana/gitinfo
script which gives info about the deployment state
2016-01-21 13:49:11 +01:00
Smaine Kahlouch
de038530ef don't run gitinfos by default 2016-01-21 13:41:01 +01:00
Smaine Kahlouch
337977e868 script which gives info about the deployment state
fix script location
2016-01-21 13:41:01 +01:00
Smaine Kahlouch
1c2bdbacb1 Merge pull request #72 from Smana/etcd_on_host
etcd directly in host
2016-01-21 13:20:05 +01:00
Smaine Kahlouch
9715962356 etcd directly in host
fix etcd configuration for nodes

fix wrong calico checksums

using a var name etcd_bin_dir

fix etcd handlers for sysvinit

using a var name etcd_bin_dir

sysvinit script

review etcd configuration
2016-01-21 11:36:11 +01:00
Smaine Kahlouch
5afbe181ce Merge pull request #78 from Smana/conf_etc-hosts_preinstall
move /etc/hosts configuration in 'preinstall' role
2016-01-20 19:02:03 +01:00
Smaine Kahlouch
a5094f2a6a move /etc/hosts configuration in 'preinstall' role 2016-01-20 17:37:23 +01:00
Antoine Legrand
9156d1ecfd Merge pull request #76 from rackn/dns-ip
Use IP is specified, otherwise use the ansible discovered address.
2016-01-20 15:46:27 +01:00
Greg Althaus
fe5ec398bf Use IP is specified, otherwise use the ansible discovered address.
This fixes cases for use in Vagrant environments.
2016-01-20 08:34:39 -06:00
Antoine Legrand
babf42f03a Merge pull request #71 from ansibl8s/add_set_remote_user
Add set remote user
2016-01-19 22:20:31 +01:00
Antoine Legrand
859f6322a0 Merge branch 'master' into add_set_remote_user 2016-01-19 21:08:52 +01:00
Smaine Kahlouch
815c5fa43c Merge pull request #74 from rackn/master
run_once only works if master[0] is first in inventory list of all nodes
2016-01-19 20:48:42 +01:00
Greg Althaus
10b2466d82 run_once only works if master[0] is first in inventory list
of all nodes.
2016-01-19 13:10:54 -06:00
Antoine Legrand
f68d8f3757 Add seT_remote_user in synchronize 2016-01-19 14:20:05 +01:00
Antoine Legrand
9b083b62cf Rename tasks 2016-01-19 14:20:05 +01:00
Smaine Kahlouch
59614fc60d Merge pull request #70 from Smana/localhost_dnsmasq
Localhost dnsmasq
2016-01-19 14:01:05 +01:00
Smaine Kahlouch
b54af6b42f reduce dns timeout 2016-01-19 13:49:33 +01:00
Smaine Kahlouch
7cab7e5fef restarting kubelet is sometimes required after docker restart 2016-01-19 13:47:07 +01:00
Smaine Kahlouch
4c5735cef8 configure dnsmasq to listen on localhost only 2016-01-19 13:34:30 +01:00
Smaine Kahlouch
58e1db6aae update kubedns submodule 2016-01-19 13:32:53 +01:00
Smaine Kahlouch
63ae6ba5b5 dnsmasq runs on all nodes 2016-01-19 10:31:47 +01:00
Smaine Kahlouch
f58b4d3dd6 dnsmasq listens on localhost 2016-01-19 10:29:33 +01:00
Smaine Kahlouch
d3a8584212 add timeout options to resolv.conf 2016-01-19 10:18:53 +01:00
Antoine Legrand
51f1ae1e9e Merge pull request #67 from ansibl8s/v1.1.4
Change hyperkube repo
2016-01-18 17:32:05 +01:00
ant31
4271126bae Change hyperkube repo 2016-01-18 17:17:08 +01:00
Smaine Kahlouch
049f5015c1 upgrade hyperkube image version 2016-01-18 16:55:57 +01:00
Smaine Kahlouch
6ab671c88b update memcached submodule 2016-01-18 16:25:01 +01:00
Smaine Kahlouch
d73ac90acf udpate k8s-pgbouncer submodule 2016-01-18 11:58:12 +01:00
Smaine Kahlouch
adf6e2f7b1 update postgres submodule 2016-01-18 11:44:33 +01:00
Smaine Kahlouch
fb0803cf4c README : update versions 2016-01-17 21:31:38 +01:00
Smaine Kahlouch
806834a6e9 upgrade kubernetes to 1.1.4 and calico to 0.14.0 2016-01-17 21:30:11 +01:00
Smaine Kahlouch
8415634016 use google hyperkube image 2016-01-16 22:55:49 +01:00
Antoine Legrand
319f687ced Merge pull request #62 from ansibl8s/flannel
Flannel running as pod
2016-01-15 13:13:56 +01:00
Smaine Kahlouch
8127e8f8e8 Flannel running as pod 2016-01-15 13:03:27 +01:00
Smaine Kahlouch
dd46cc64a4 README : Networking title 2016-01-15 12:18:26 +01:00
Smaine Kahlouch
2d5862a94d README : typo 2016-01-15 12:18:21 +01:00
Smaine Kahlouch
3d45a81006 README: ansible basics docs link 2016-01-15 12:18:13 +01:00
Smaine Kahlouch
51a0996087 fix regexp for resolv.conf 2016-01-15 12:18:03 +01:00
Smaine Kahlouch
80ac2ec6fc update README 2016-01-15 12:17:28 +01:00
ant31
5d61b5e813 Fix namespace 2016-01-14 16:22:37 +01:00
ant31
b769636435 Ansible 2.0 2016-01-13 16:40:24 +01:00
Smaine Kahlouch
af8f394714 update README, local connection 2016-01-08 16:04:17 +01:00
Smaine Kahlouch
eab2cec0ad fix kubectl perms 2016-01-08 16:02:40 +01:00
Smaine Kahlouch
0b17a4c00f Merge pull request #45 from jcsirot/fix-calico-systemd
Fix calico with systemd
2016-01-08 11:34:58 +01:00
ant31
f49aa90bf7 fix synchronize pull mode 2016-01-08 11:32:06 +01:00
Jean-Christophe Sirot
6f9148e994 Fix calico with systemd 2016-01-08 10:32:43 +01:00
Antoine Legrand
7c8e9dbe00 Update README.md 2016-01-08 00:36:06 +01:00
Antoine Legrand
df3d0bcc21 Build status 2016-01-08 00:20:31 +01:00
Antoine Legrand
7913d62749 Merge pull request #44 from ansibl8s/travis
Travis  tests
2016-01-07 23:46:02 +01:00
Smaine Kahlouch
d5320961e9 enforce user root when sudo is used 2016-01-05 15:33:23 +01:00
ant31
9c461e1018 Use inline update for resolv.conf 2016-01-05 12:31:49 +01:00
ant31
9a03249446 Add travis tests 2016-01-05 12:31:49 +01:00
Smaine Kahlouch
4e015dd3f1 add requirement python-netaddr 2016-01-04 17:06:26 +01:00
Smaine Kahlouch
6f53269ce3 Merge pull request #40 from ansibl8s/common
Common
2016-01-04 17:01:08 +01:00
Smaine Kahlouch
e356b2de4f Updated README 2016-01-04 17:00:40 +01:00
ant31
8fa0110e28 Remove local dep. downloader 2016-01-04 16:10:29 +01:00
Smaine Kahlouch
2a08f7bc0a inventory example, localhost for downloader 2016-01-04 14:54:30 +01:00
Smaine Kahlouch
99d16913d3 use bin_dir var in init scripts 2016-01-04 14:35:01 +01:00
Smaine Kahlouch
d172457504 sysvinit scripts 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
6103d673b7 New calico's configuration 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
29bf90a858 review handlers for sysvinit 2016-01-04 14:30:37 +01:00
Smaine Kahlouch
2c35e4c055 Merge pull request #41 from ansibl8s/download_role
Rework download role
2015-12-31 16:22:07 +01:00
ant31
e3cdb3574a Rework download role 2015-12-31 16:12:16 +01:00
Smaine Kahlouch
15cd1bfc56 rename env file 2015-12-31 14:55:06 +01:00
Smaine Kahlouch
392570f4ff distinct local hostname 2015-12-31 14:54:51 +01:00
Smaine Kahlouch
be5fe9af54 never report changed for init system detection 2015-12-31 14:54:15 +01:00
Smaine Kahlouch
7006d56ab8 split role download and preinstall 2015-12-31 14:07:02 +01:00
Smaine Kahlouch
1695682d85 handle sysvinit 2015-12-31 14:05:55 +01:00
Smaine Kahlouch
1d1d8b9c28 add nodnsupdate hook for RedHat 2015-12-31 14:04:08 +01:00
Smaine Kahlouch
98fe2c02b2 review local tasks 2015-12-31 10:28:47 +01:00
Smaine Kahlouch
92c2a9457e rename role common to kubernetes/preinstall 2015-12-31 10:03:22 +01:00
Smaine Kahlouch
a11e0cb3d1 keep host downloader 2015-12-31 09:38:55 +01:00
Smaine Kahlouch
dbb6f4934e common role in order to support other linux distribs 2015-12-30 22:26:45 +01:00
Smaine Kahlouch
9f07f2a951 install docker on a largest number of linux distribution (based on https://github.com/marklee77/ansible-role-docker) 2015-12-30 22:26:45 +01:00
Smaine Kahlouch
005ddedb94 Merge pull request #38 from ansibl8s/dockerize_dnsmasq
[WIP] Docker dnsmasq
2015-12-30 14:04:17 +01:00
Smaine Kahlouch
b72e220126 remove carriage return 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
e0f460d9b5 copy template dnsmasq pod and remove handlers 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
2bd6b83656 increase etcd timeout value again 2015-12-30 14:02:22 +01:00
ant31
2df70d6a3d Docker dnsmasq 2015-12-30 14:02:22 +01:00
Smaine Kahlouch
ddaeb2b8fa Merge pull request #35 from ansibl8s/dockerize_etcd
Dockerize etcd
2015-12-30 14:01:00 +01:00
Smaine Kahlouch
6f4f170a88 remove useless etcd download, runs into docker containers 2015-12-30 09:50:02 +01:00
Smaine Kahlouch
3f3b03bc99 increase timeout value for etcd wait_for 2015-12-29 21:37:17 +01:00
Smaine Kahlouch
c9d9ccf025 move network-environment template into node role, required by kubelet 2015-12-29 21:36:51 +01:00
ant31
e378f4fb14 Install calico-plugin before running calico 2015-12-28 22:04:39 +01:00
Antoine Legrand
5c15d14f12 Run etcd as pod 2015-12-28 22:04:39 +01:00
Antoine Legrand
b45747ec86 Merge pull request #37 from ansibl8s/apiserver_https
Apiserver https
2015-12-28 13:00:46 +01:00
ant31
d597f707f1 use backup file 2015-12-24 19:23:21 +01:00
Smaine Kahlouch
4388cab8d6 Use second ip address in order to avoid any ip range problem 2015-12-24 13:58:04 +01:00
Smaine Kahlouch
595e93e6da Peer with router configuration is made on the first etcd node 2015-12-24 13:56:53 +01:00
Smaine Kahlouch
5f4e01cec5 new version of logstash submodule 2015-12-22 16:38:40 +01:00
Smaine Kahlouch
7c9c609ac4 calico uses loadbalancer address for apiserver 2015-12-22 08:45:14 +01:00
Smaine Kahlouch
680864f95c don't sync certs on masters, already done in another task 2015-12-21 14:24:57 +01:00
Smaine Kahlouch
7315d33e3c use ip for etcd proxies even when hostnames are used in the inventory 2015-12-21 14:24:10 +01:00
Smaine Kahlouch
b2afbfd4fb don't touch if the file exists 2015-12-21 14:23:33 +01:00
Smaine Kahlouch
ab694ee291 Install python-httplib2 required packaged 2015-12-21 12:00:42 +01:00
Smaine Kahlouch
bba3525cd8 use loadbalancer when that's possible 2015-12-21 09:13:48 +01:00
Smaine Kahlouch
2c816f66a3 Check calico network pool 2015-12-20 16:51:14 +01:00
Smaine Kahlouch
d585ceaf3b set permissions on network-environment file 2015-12-19 12:32:06 +01:00
Smaine Kahlouch
fec1dc9041 A single file for tokens tasks 2015-12-19 11:00:22 +01:00
Smaine Kahlouch
e7e03bae9f calico talks to apiserver with https 2015-12-18 22:22:52 +01:00
Smaine Kahlouch
b81a064242 README, update inventory 2015-12-18 16:40:58 +01:00
Smaine Kahlouch
03d402e226 using hostnames in the inventory is more readable 2015-12-18 16:13:42 +01:00
Smaine Kahlouch
0a238d9853 Specify etcd servers for the etcd cluster 2015-12-18 14:31:51 +01:00
Smaine Kahlouch
4fe0ced5db README, Fix http links 2015-12-18 13:32:03 +01:00
Smaine Kahlouch
c6d65cb535 remove temporary workaround due to node reboot issue with calico 2 2015-12-18 13:25:46 +01:00
Smaine Kahlouch
a0746a3efd remove temporary workaround due to node reboot issue with calico 2015-12-18 13:22:32 +01:00
Smaine Kahlouch
46807c655d Update README 2015-12-18 13:21:22 +01:00
Smaine Kahlouch
970aab70e1 Upgrade calico version to v0.13.0, fixes the node reboot issue 2015-12-18 13:10:26 +01:00
Smaine Kahlouch
4561dd327b remove deprecated var CALICOCTL_PATH 2015-12-18 13:09:42 +01:00
Smaine Kahlouch
94c0c32752 The etcd role is run on all the servers 2015-12-18 11:29:06 +01:00
Smaine Kahlouch
b155e8cc7b Fix error in ETCD_INITIAL_CLUSTER loop 2015-12-18 11:22:56 +01:00
Smaine Kahlouch
9046b7b1bf Configure calico pool on an etcd server 2015-12-18 10:16:03 +01:00
Antoine Legrand
3c450191ea User etcd node ip in initial cluster 2015-12-17 22:47:19 +01:00
Antoine Legrand
184bb8c94d Use 0755 mode for binaries 2015-12-17 22:46:50 +01:00
Antoine Legrand
a003d91576 simplify inventory path 2015-12-17 21:32:06 +01:00
Smaine Kahlouch
9914229484 using ip address instead of inventory_hostname for kube-proxy 2015-12-17 10:43:06 +01:00
Smaine Kahlouch
b3841659d7 Review role order, use master ip even when fqdn are used in the inventory 2015-12-16 23:49:01 +01:00
Smaine Kahlouch
3a349b8519 Using var file for etcd service 2015-12-16 21:43:29 +01:00
Antoine Legrand
6e91b6f47c Merge pull request #22 from ansibl8s/ha_master
- HA (kubernetes and etcd)
- Dockerize kubenertes components (api-server/scheduler/replica-manager/proxy)
2015-12-16 18:11:50 +01:00
ant31
bf5c531037 Merge branch 'master' into ha 2015-12-16 18:09:50 +01:00
ant31
44ac355aa7 Update depedencies 2015-12-16 18:01:52 +01:00
ant31
958c770bef Update ports 2015-12-16 17:43:26 +01:00
ant31
6012230110 Merge branch 'ha_master' of https://github.com/ansibl8s/setup-kubernetes into ha 2015-12-15 17:42:01 +01:00
Smaine Kahlouch
61bb6468ef Update README, cluster.yml 2015-12-15 17:24:37 +01:00
Smaine Kahlouch
f2069b296c BGP peering and loadbalancing vars are managed in a group_vars file 2015-12-15 17:16:19 +01:00
Smaine Kahlouch
9649f2779d Commenting out loadbalancing vars 2015-12-15 17:01:29 +01:00
Smaine Kahlouch
c91a3183d3 manage undefined vars for loadbalancing 2015-12-15 16:51:55 +01:00
ant31
693230ace9 Merge branch 'ha_master' of https://github.com/ansibl8s/setup-kubernetes into ha 2015-12-15 16:28:49 +01:00
ant31
f21f660cc5 Use kube_apiserver_port 2015-12-15 16:27:12 +01:00
Smaine Kahlouch
43afd42f59 use 3 members for etcd clustering 2015-12-15 15:27:12 +01:00
Smaine Kahlouch
4d1828c724 group vars per location 2015-12-15 15:25:24 +01:00
Smaine Kahlouch
953f482585 kube-proxy loadbalancing, need an external loadbalancer 2015-12-15 15:20:08 +01:00
Smaine Kahlouch
4055980ce6 ha apiservers for kubelet 2015-12-15 13:14:27 +01:00
Smaine Kahlouch
e2984b4fdb ha etcd with calico 2015-12-15 11:49:11 +01:00
ant31
394a64f904 Add etcd in apps.yml 2015-12-14 22:42:00 +01:00
Smaine Kahlouch
2fc8b46996 etcd can run on a distinct cluster 2015-12-14 10:39:13 +01:00
Smaine Kahlouch
5efc09710b Renaming hyperkube image vars 2015-12-14 09:54:58 +01:00
Smaine Kahlouch
f908309739 update README with multi-master notes 2015-12-13 16:59:22 +01:00
Smaine Kahlouch
9862afb097 Upgrade kubernetes to v1.1.3 2015-12-13 16:41:18 +01:00
Smaine Kahlouch
59994a6df1 Quickstart documentation 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
0a1b92f348 cluster log level variable 'kube_log_level' 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
af9b945874 add the loadbalancer address to ssl certs 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3cbcd6f189 Calico uses the loadbalancer to reach etcd if 'loadbalancer_address' is defined. The loadbalancer has to be configured first 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
1568cbe8e9 optionnal api runtime extensions 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
eb4dd5f19d update kubectl bash completion 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
fd0e5e756e Update README, new versions 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
f49620517e running kubernetes master processes as pods 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
ef8a46b8c5 Doesn't manage firewall, note: has to be disabled before running the playbook 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
47c211f9c1 upgrading docker version 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
b23b8aa3de dnsmasq with multi master arch 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3981b73924 download only required kubernetes binaries 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
e0ec3e7241 Using one var file per environment is simplier 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
b66cc67b6f Configure network-environment with a single template 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
83c1105192 Configuring calico pool once, before starting calico-node 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
d9a8de487f review roles order 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
d1e19563b0 Master and nodes will run the 'node' role, kube-proxy is run under a container, new script for ssl certs 2015-12-12 19:37:08 +01:00
Smaine Kahlouch
3014dfef24 Clustering etcd for ha masters 2015-12-12 19:37:08 +01:00
ant31
b92fa01e05 Remove etcd dir 2015-12-10 23:17:12 +01:00
ant31
e3ebc8e009 Add Rabbitmq 2015-12-10 20:47:59 +01:00
ant31
625efc85af Merge branch 'master' of https://github.com/ansibl8s/setup-kubernetes 2015-12-10 20:47:15 +01:00
ant31
d30474d305 Add k8s-etcd 2015-12-10 20:46:33 +01:00
Smaine Kahlouch
9cecc30b6d changing proxy mode to default 'userspace', issues with 'iptables' 2015-12-09 15:03:57 +01:00
147 changed files with 21594 additions and 1564 deletions

10
.gitmodules vendored
View File

@@ -41,3 +41,13 @@
[submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git
[submodule "roles/apps/k8s-etcd"]
path = roles/apps/k8s-etcd
url = https://github.com/ansibl8s/k8s-etcd.git
[submodule "roles/apps/k8s-rabbitmq"]
path = roles/apps/k8s-rabbitmq
url = https://github.com/ansibl8s/k8s-rabbitmq.git
[submodule "roles/apps/k8s-pgbouncer"]
path = roles/apps/k8s-pgbouncer
url = https://github.com/ansibl8s/k8s-pgbouncer.git
branch = v1.0

38
.travis.yml Normal file
View File

@@ -0,0 +1,38 @@
sudo: required
dist: trusty
language: python
python: "2.7"
addons:
hosts:
- node1
env:
- SITE=cluster.yml ANSIBLE_VERSION=2.0.0
install:
# Install Ansible.
- sudo -H pip install ansible==${ANSIBLE_VERSION}
- sudo -H pip install netaddr
cache:
directories:
- $HOME/releases
- $HOME/.cache/pip
before_script:
- export PATH=$PATH:/usr/local/bin
script:
# Check the role/playbook's syntax.
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --syntax-check"
# Run the role/playbook with ansible-playbook.
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local"
# Run the role/playbook again, checking to make sure it's idempotent.
- >
sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local
| tee /dev/stderr | grep -q 'changed=0.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)

233
README.md
View File

@@ -1,36 +1,70 @@
[![Build Status](https://travis-ci.org/ansibl8s/setup-kubernetes.svg)](https://travis-ci.org/ansibl8s/setup-kubernetes)
kubernetes-ansible
========
Install and configure a kubernetes cluster including network plugin and optionnal addons.
Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
This project allows to
- Install and configure a **Multi-Master/HA kubernetes** cluster.
- Choose the **network plugin** to be used within the cluster
- A **set of roles** in order to install applications over the k8s cluster
- A **flexible method** which helps to create new roles for apps.
Linux distributions tested:
* **Debian** Wheezy, Jessie
* **Ubuntu** 14.10, 15.04, 15.10
* **Fedora** 23
* **CentOS** 7 (Currently with flannel only)
### Requirements
Tested on **Debian Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
The target servers must have access to the Internet in order to pull docker imaqes.
The firewalls are not managed, you'll need to implement your own rules the way you used to.
Ansible v1.9.x
* The target servers must have **access to the Internet** in order to pull docker imaqes.
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should **disable your firewall**
* **Copy your ssh keys** to all the servers part of your inventory.
* **Ansible v2.x and python-netaddr**
* Base knowledge on Ansible. Please refer to [Ansible documentation](http://www.ansible.com/how-ansible-works)
### Components
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.11.0
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.4
* [etcd](https://github.com/coreos/etcd/releases) v2.2.4
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.14.0
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
* [docker](https://www.docker.com/) v1.8.3
* [docker](https://www.docker.com/) v1.9.1
Quickstart
-------------------------
The following steps will quickly setup a kubernetes cluster with default configuration.
These defaults are good for tests purposes.
Edit the inventory according to the number of servers
```
[kube-master]
10.115.99.31
[etcd]
10.115.99.31
10.115.99.32
10.115.99.33
[kube-node]
10.115.99.32
10.115.99.33
[k8s-cluster:children]
kube-node
kube-master
```
Run the playbook
```
ansible-playbook -i inventory/inventory.cfg cluster.yml -u root
```
You can jump directly to "*Available apps, installation procedure*"
Ansible
-------------------------
### Download binaries
A role allows to download required binaries. They will be stored in a directory defined by the variable
**'local_release_dir'** (by default /tmp).
Please ensure that you have enough disk space there (about **1G**).
**Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory.
### Variables
The main variables to change are located in the directory ```environments/[env_name]/group_vars/k8s-cluster.yml```.
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
### Inventory
Below is an example of an inventory.
@@ -39,109 +73,111 @@ By default this variable is set to false and therefore all the nodes are configu
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
```
[downloader]
10.99.0.26
[kube-master]
10.99.0.26
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
[etcd]
10.99.0.26
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
[kube-node]
10.99.0.4
10.99.0.5
10.99.0.36
10.99.0.37
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
node4 ansible_ssh_host=10.99.0.5
node5 ansible_ssh_host=10.99.0.36
node6 ansible_ssh_host=10.99.0.37
[paris]
10.99.0.26
10.99.0.4 local_as=xxxxxxxx
10.99.0.5 local_as=xxxxxxxx
node1 ansible_ssh_host=10.99.0.26
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
[usa]
10.99.0.36 local_as=xxxxxxxx
10.99.0.37 local_as=xxxxxxxx
[new-york]
node2 ansible_ssh_host=10.99.0.27
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
[k8s-cluster:children]
kube-node
kube-master
[paris:vars]
peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}]
[usa:vars]
peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}]
```
### Playbook
```
---
- hosts: downloader
sudo: no
roles:
- { role: download, tags: download }
- hosts: k8s-cluster
roles:
- { role: etcd, tags: etcd }
- { role: download, tags: download }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- { role: kubernetes/node, tags: node }
- { role: etcd, tags: etcd }
- { role: dnsmasq, tags: dnsmasq }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- hosts: kube-node
roles:
- { role: kubernetes/node, tags: node }
```
### Run
It is possible to define variables for different environments.
For instance, in order to deploy the cluster on 'dev' environment run the following command.
```
ansible-playbook -i environments/dev/inventory cluster.yml -u root
ansible-playbook -i inventory/dev/inventory.cfg cluster.yml -u root
```
Kubernetes
-------------------------
### Multi master notes
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
the server address has to be present on both groups 'kube-master' and 'kube-node'.
* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
* For safety reasons, you should have at least two master nodes and 3 etcd servers
* Kube-proxy doesn't support multiple apiservers on startup ([Issue 18174](https://github.com/kubernetes/kubernetes/issues/18174)). An external loadbalancer needs to be configured.
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
### Network Overlay
You can choose between 2 network plugins. Only one must be chosen.
* **flannel**: gre/vxlan (layer 2) networking. ([official docs]('https://github.com/coreos/flannel'))
* **flannel**: gre/vxlan (layer 2) networking. ([official docs](https://github.com/coreos/flannel))
* **calico**: bgp (layer 3) networking. ([official docs]('http://docs.projectcalico.org/en/0.13/'))
* **calico**: bgp (layer 3) networking. ([official docs](http://docs.projectcalico.org/en/0.13/))
The choice is defined with the variable '**kube_network_plugin**'
### Expose a service
There are several loadbalancing solutions.
The ones i found suitable for kubernetes are [Vulcand]('http://vulcand.io/') and [Haproxy]('http://www.haproxy.org/')
The one i found suitable for kubernetes are [Vulcand](http://vulcand.io/) and [Haproxy](http://www.haproxy.org/)
My cluster is working with haproxy and kubernetes services are configured with the loadbalancing type '**nodePort**'.
eg: each node opens the same tcp port and forwards the traffic to the target pod wherever it is located.
Then Haproxy can be configured to request kubernetes's api in order to loadbalance on the proper tcp port on the nodes.
Please refer to the proper kubernetes documentation on [Services]('https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md')
Please refer to the proper kubernetes documentation on [Services](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md)
### Check cluster status
#### Kubernetes components
Master processes : kube-apiserver, kube-scheduler, kube-controller, kube-proxy
Nodes processes : kubelet, kube-proxy, [calico-node|flanneld]
* Check the status of the processes
```
systemctl status [process_name]
systemctl status kubelet
```
* Check the logs
```
journalctl -ae -u [process_name]
journalctl -ae -u kubelet
```
* Check the NAT rules
@@ -149,6 +185,11 @@ journalctl -ae -u [process_name]
iptables -nLv -t nat
```
For the master nodes you'll have to see the docker logs for the apiserver
```
docker logs [apiserver docker id]
```
### Available apps, installation procedure
@@ -163,7 +204,7 @@ The list of available apps are available [there](https://github.com/ansibl8s)
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
In order to use this role you'll need the following entries in the file '*requirements.yml*'
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
Please refer to the [k8s-kubedns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
```
- src: https://github.com/ansibl8s/k8s-common.git
path: roles/apps
@@ -185,21 +226,6 @@ Then download the roles with ansible-galaxy
ansible-galaxy install -r requirements.yml
```
#### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
In order to install the dns addon you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
```
...
@@ -210,11 +236,17 @@ Finally update the playbook ```apps.yml``` with the chosen roles, and run it
```
```
ansible-playbook -i environments/dev/inventory apps.yml -u root
ansible-playbook -i inventory/inventory.cfg apps.yml -u root
```
#### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
#### Calico networking
### Networking
#### Calico
Check if the calico-node container is running
```
docker ps | grep calico
@@ -235,6 +267,53 @@ calicoctl pool show
```
calicoctl endpoint show --detail
```
#### Flannel networking
#### Flannel
* Flannel configuration file should have been created there
```
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
```
* Check if the network interface has been created
```
ip a show dev flannel.1
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
inet 10.233.16.0/18 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```
kubectl run test --image=busybox --command -- tail -f /dev/null
replicationcontroller "test" created
kubectl describe po test-34ozs | grep ^IP
IP: 10.233.16.2
```
```
kubectl exec test-34ozs -- ip a show dev eth0
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
inet 10.233.16.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
valid_lft forever preferred_lft forever
```
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)

View File

@@ -2,22 +2,32 @@
- hosts: kube-master
roles:
# System
- { role: apps/k8s-kubedns, tags: 'kubedns' }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] }
# Databases
- { role: apps/k8s-postgres, tags: 'postgres' }
- { role: apps/k8s-elasticsearch, tags: 'es' }
- { role: apps/k8s-memcached, tags: 'es' }
- { role: apps/k8s-redis, tags: 'es' }
- { role: apps/k8s-elasticsearch, tags: 'elasticsearch' }
- { role: apps/k8s-memcached, tags: 'memcached' }
- { role: apps/k8s-redis, tags: 'redis' }
- { role: apps/k8s-mongodb-simple, tags: 'mongodb-simple' }
# Msg Broker
- { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
# Monitoring
- { role: apps/k8s-influxdb, tags: 'influxdb'}
- { role: apps/k8s-heapster, tags: 'heapster'}
- { role: apps/k8s-kubedash, tags: 'kubedash'}
- { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']}
- { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']}
- { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']}
# logging
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
# Console
- { role: apps/k8s-fabric8, tags: 'fabric8' }
- { role: apps/k8s-kube-ui, tags: 'kube-ui' }
- { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']}
# ETCD
- { role: apps/k8s-etcd, tags: 'etcd'}
# Chat Apps
- { role: apps/k8s-rocketchat, tags: 'rocketchat'}

View File

@@ -1,20 +1,15 @@
---
- hosts: downloader
sudo: no
roles:
- { role: download, tags: download }
- hosts: k8s-cluster
roles:
- { role: etcd, tags: etcd }
- { role: adduser, tags: adduser }
- { role: download, tags: download }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- { role: kubernetes/node, tags: node }
- { role: etcd, tags: etcd }
- { role: dnsmasq, tags: dnsmasq }
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- hosts: kube-node
roles:
- { role: kubernetes/node, tags: node }

View File

@@ -1,6 +0,0 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"

View File

@@ -1,25 +1,39 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
# kube_users:
# kube:
# pass: changeme
# role: admin
kube_users:
kube:
pass: changeme
role: admin
# root:
# pass: changeme
# role: admin
# Kubernetes cluster name, also will be used as DNS domain
# cluster_name: cluster.local
cluster_name: cluster.local
# set this variable to calico if needed. keep it empty if flannel is used
# kube_network_plugin: calico
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
# kube_service_addresses: 10.233.0.0/18
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
# kube_pods_subnet: 10.233.64.0/18
kube_pods_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire network. Must be unused in your environment.
@@ -28,16 +42,17 @@
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
# kube_network_node_prefix: 24
kube_network_node_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
# peer_with_router: false
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# The port the API Server will be listening on.
# kube_master_port: 443 # (https)
# kube_master_insecure_port: 8080 # (http)
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
@@ -48,13 +63,28 @@
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
# upstream_dns_servers:
# - 8.8.8.8
# - 4.4.8.8
upstream_dns_servers:
- 8.8.8.8
- 4.4.8.8
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
# dns_setup: true
# dns_domain: "{{ cluster_name }}"
dns_setup: true
dns_domain: "{{ cluster_name }}"
#
# # Ip address of the kubernetes dns service
# dns_server: 10.233.0.10
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# For multi masters architecture:
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
# This domain name will be inserted into the /etc/hosts file of all servers
# configuration example with haproxy :
# listen kubernetes-apiserver-https
# bind 10.99.0.21:8383
# option ssl-hello-chk
# mode tcp
# timeout client 3h
# timeout server 3h
# server master1 10.99.0.26:443
# server master2 10.99.0.27:443
# balance roundrobin
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"

View File

@@ -0,0 +1,10 @@
#---
#peers:
# -router_id: "10.99.0.34"
# as: "65xxx"
# - router_id: "10.99.0.35"
# as: "65xxx"
#
#loadbalancer_apiserver:
# address: "10.99.0.44"
# port: "8383"

View File

@@ -0,0 +1,10 @@
#---
#peers:
# -router_id: "10.99.0.2"
# as: "65xxx"
# - router_id: "10.99.0.3"
# as: "65xxx"
#
#loadbalancer_apiserver:
# address: "10.99.0.21"
# port: "8383"

View File

@@ -0,0 +1,29 @@
[kube-master]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
[etcd]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
[kube-node]
node2 ansible_ssh_host=10.99.0.27
node3 ansible_ssh_host=10.99.0.4
node4 ansible_ssh_host=10.99.0.5
node5 ansible_ssh_host=10.99.0.36
node6 ansible_ssh_host=10.99.0.37
[paris]
node1 ansible_ssh_host=10.99.0.26
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
[new-york]
node2 ansible_ssh_host=10.99.0.27
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
[k8s-cluster:children]
kube-node
kube-master

14
inventory/local-tests.cfg Normal file
View File

@@ -0,0 +1,14 @@
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
[kube-master]
node1
[etcd]
node1
[kube-node]
node1
[k8s-cluster:children]
kube-node
kube-master

View File

@@ -31,6 +31,10 @@
# path: roles/apps
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-pgbouncer.git
# path: roles/apps
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-heapster.git
# path: roles/apps
#

View File

@@ -0,0 +1,15 @@
---
addusers:
- name: etcd
comment: "Etcd user"
createhome: yes
home: "/var/lib/etcd"
system: yes
shell: /bin/nologin
- name: kube
comment: "Kubernetes user"
shell: /sbin/nologin
system: yes
group: "{{ kube_cert_group }}"
createhome: no

View File

@@ -0,0 +1,13 @@
- name: User | Create User Group
group: name={{item.group|default(item.name)}} system={{item.system|default(omit)}}
with_items: addusers
- name: User | Create User
user:
comment: "{{item.comment|default(omit)}}"
createhome: "{{item.create_home|default(omit)}}"
group: "{{item.group|default(item.name)}}"
home: "{{item.home|default(omit)}}"
name: "{{item.name}}"
system: "{{item.system|default(omit)}}"
with_items: addusers

1
roles/apps/k8s-etcd Submodule

Submodule roles/apps/k8s-etcd added at abd61ee91a

View File

@@ -1,3 +0,0 @@
---
- name: restart dnsmasq
command: systemctl restart dnsmasq

View File

@@ -1,58 +1,95 @@
---
- name: populate inventory into hosts file
lineinfile:
dest: /etc/hosts
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
state: present
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']
- name: clean hosts file
lineinfile:
dest: /etc/hosts
regexp: "{{ item }}"
state: absent
with_items:
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
- '^::1(\s+){{ inventory_hostname }}.*'
- name: install dnsmasq and bindr9utils
apt:
name: "{{ item }}"
state: present
with_items:
- dnsmasq
- bind9utils
when: inventory_hostname in groups['kube-master'][0]
- name: ensure dnsmasq.d directory exists
file:
path: /etc/dnsmasq.d
state: directory
when: inventory_hostname in groups['kube-master'][0]
- name: configure dnsmasq
- name: ensure dnsmasq.d-available directory exists
file:
path: /etc/dnsmasq.d-available
state: directory
- name: Write dnsmasq configuration
template:
src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
mode: 0755
backup: yes
- name: Stat dnsmasq configuration
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
register: sym
- name: Move previous configuration
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
changed_when: False
when: sym.stat.islnk is defined and sym.stat.islnk == False
- name: Enable dnsmasq configuration
file:
src: /etc/dnsmasq.d-available/01-kube-dns.conf
dest: /etc/dnsmasq.d/01-kube-dns.conf
mode: 755
notify:
- restart dnsmasq
when: inventory_hostname in groups['kube-master'][0]
state: link
- name: enable dnsmasq
service:
name: dnsmasq
state: started
enabled: yes
when: inventory_hostname in groups['kube-master'][0]
- name: Create dnsmasq pod manifest
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
- name: update resolv.conf with new DNS setup
template:
src: resolv.conf.j2
dest: /etc/resolv.conf
mode: 644
- name: Check for dnsmasq port (pulling image and running container)
wait_for:
port: 53
delay: 5
- name: check resolvconf
stat: path=/etc/resolvconf/resolv.conf.d/head
register: resolvconf
- name: target resolv.conf file
set_fact:
resolvconffile: >-
{%- if resolvconf.stat.exists == True -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
- name: Add search resolv.conf
lineinfile:
line: "search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}"
dest: "{{resolvconffile}}"
state: present
insertbefore: BOF
backup: yes
follow: yes
- name: Add local dnsmasq to resolv.conf
lineinfile:
line: "nameserver 127.0.0.1"
dest: "{{resolvconffile}}"
state: present
insertafter: "^search.*$"
backup: yes
follow: yes
- name: Add options to resolv.conf
lineinfile:
line: options {{ item }}
dest: "{{resolvconffile}}"
state: present
regexp: "^options.*{{ item }}$"
insertafter: EOF
backup: yes
follow: yes
with_items:
- timeout:2
- attempts:2
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=0755 backup=yes
when: ansible_os_family == "Debian"
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
when: ansible_os_family == "RedHat"
- name: update resolvconf
command: resolvconf -u
changed_when: False
when: resolvconf.stat.exists == True
- meta: flush_handlers

View File

@@ -1,5 +1,6 @@
#Listen on all interfaces
interface=*
#Listen on localhost
bind-interfaces
listen-address=127.0.0.1
addn-hosts=/etc/hosts

View File

@@ -0,0 +1,49 @@
---
apiVersion: v1
kind: Pod
metadata:
name: dnsmasq
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: dnsmasq
image: andyshinn/dnsmasq:2.72
command:
- dnsmasq
args:
- -k
- "-7"
- /etc/dnsmasq.d
- --local-service
securityContext:
capabilities:
add:
- NET_ADMIN
imagePullPolicy: Always
resources:
limits:
cpu: 100m
memory: 256M
ports:
- name: dns
containerPort: 53
hostPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
hostPort: 53
protocol: TCP
volumeMounts:
- name: etcdnsmasqd
mountPath: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
mountPath: /etc/dnsmasq.d-available
volumes:
- name: etcdnsmasqd
hostPath:
path: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
hostPath:
path: /etc/dnsmasq.d-available

View File

@@ -1,5 +0,0 @@
; generated by ansible
search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
{% for host in groups['kube-master'] %}
nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
{% endfor %}

2
roles/docker/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.*.swp
.vagrant

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
[Service]
EnvironmentFile=-/etc/default/docker
Type=notify
ExecStart=/usr/bin/docker daemon -H fd:// $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -1,12 +0,0 @@
---
- name: restart docker
command: /bin/true
notify:
- reload systemd
- restart docker service
- name: reload systemd
shell: systemctl daemon-reload
- name: restart docker service
service: name=docker state=restarted

View File

@@ -1,16 +0,0 @@
---
- name: enable docker
service:
name: docker
enabled: yes
state: started
tags:
- docker
#- name: login to arkena's docker registry
# shell : >
# docker login --username={{ dockerhub_user }}
# --password={{ dockerhub_pass }}
# --email={{ dockerhub_email }}
- meta: flush_handlers

View File

@@ -1,24 +0,0 @@
---
- name: Install prerequisites for https transport
apt: pkg={{ item }} state=present update_cache=yes
with_items:
- apt-transport-https
- ca-certificates
- name: Configure docker apt repository
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list backup=yes
- name: Install docker-engine
apt: pkg={{ item }} state=present force=yes update_cache=yes
with_items:
- aufs-tools
- cgroupfs-mount
- docker-engine=1.8.3-0~{{ ansible_distribution_release }}
- name: Copy default docker configuration
template: src=default-docker.j2 dest=/etc/default/docker backup=yes
notify: restart docker
- name: Copy Docker systemd unit file
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service backup=yes
notify: restart docker

View File

@@ -1,3 +1,58 @@
---
- include: install.yml
- include: configure.yml
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- name: check for minimum kernel version
fail:
msg: >
docker requires a minimum kernel version of
{{ docker_kernel_min_version }} on
{{ ansible_distribution }}-{{ ansible_distribution_version }}
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
- name: ensure docker repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}"
args:
id: "{{item}}"
keyserver: "{{docker_repo_key_info.keyserver}}"
state: present
with_items: docker_repo_key_info.repo_keys
- name: ensure docker repository is enabled
action: "{{ docker_repo_info.pkg_repo }}"
args:
repo: "{{item}}"
update_cache: yes
state: present
with_items: docker_repo_info.repos
when: docker_repo_info.repos|length > 0
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
pkg: "{{item}}"
update_cache: yes
state: latest
with_items: docker_package_info.pkgs
when: docker_package_info.pkgs|length > 0
- meta: flush_handlers
- name: ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes
state: started
with_items:
- docker

View File

@@ -1,13 +0,0 @@
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
#DOCKER_OPTS=""
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"

View File

@@ -1 +0,0 @@
deb https://apt.dockerproject.org/repo {{ansible_distribution|lower}}-{{ ansible_distribution_release}} main

View File

@@ -0,0 +1,14 @@
docker_kernel_min_version: '2.6.32-431'
docker_package_info:
pkg_mgr: yum
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@@ -0,0 +1,20 @@
docker_kernel_min_version: '3.2'
docker_package_info:
pkg_mgr: apt
pkgs:
- docker-engine
docker_repo_key_info:
pkg_key: apt_key
keyserver: hkp://p80.pool.sks-keyservers.net:80
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
docker_repo_info:
pkg_repo: apt_repository
repos:
- >
deb https://apt.dockerproject.org/repo
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: dnf
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@@ -1,4 +0,0 @@
---
#dockerhub_user:
#dockerhub_pass:
#dockerhub_email:

View File

@@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
pkgs:
- docker
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@@ -1,13 +1,66 @@
---
etcd_download_url: https://github.com/coreos/etcd/releases/download
flannel_download_url: https://github.com/coreos/flannel/releases/download
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
local_release_dir: /tmp
etcd_version: v2.2.2
flannel_version: 0.5.5
# Versions
kube_version: v1.1.4
etcd_version: v2.2.4
calico_version: v0.14.0
calico_plugin_version: v0.7.0
kube_version: v1.1.2
kube_sha1: 69d110d371752c6492d2f8695aa7a47be5b6ed4e
# Download URL's
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
calico_version: v0.11.0
# Checksums
calico_checksum: "f251d7a8583233906aa6d059447c1e4fb32bf1369a51fdf96a68d50466d6a69c"
calico_plugin_checksum: "032f582f5eeec6fb26191d2fbcbf8bca4da3b14abb579db7baa7b3504d4dffec"
etcd_checksum: "6c4e5cdeaaac1a70b8f06b5dd6b82c37ff19993c9bca81248975610e555c4b9b"
kubectl_checksum: "873ba19926d17a3287dc8639ea1434fe3cd0cb4e61d82101ba754922cfc7a633"
kubelet_checksum: "f2d1eae3fa6e304f6cbc9b2621e4b86fc3bcb4e74a15d35f58bf00e45c706e0a"
kube_apiserver_checksum: "bb3814c4df65f1587a3650140437392ce3fb4b64f51d459457456691c99f1202"
downloads:
- name: calico
dest: calico/bin/calicoctl
sha256: "{{ calico_checksum }}"
url: "{{ calico_download_url }}"
owner: "root"
mode: "0755"
- name: calico-plugin
dest: calico/bin/calico
sha256: "{{ calico_plugin_checksum }}"
url: "{{ calico_plugin_download_url }}"
owner: "root"
mode: "0755"
- name: etcd
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
sha256: "{{ etcd_checksum }}"
url: "{{ etcd_download_url }}"
unarchive: true
owner: "etcd"
mode: "0755"
- name: kubernetes-kubelet
dest: kubernetes/bin/kubelet
sha256: "{{kubelet_checksum}}"
url: "{{ kube_download_url }}/kubelet"
owner: "kube"
mode: "0755"
- name: kubernetes-kubectl
dest: kubernetes/bin/kubectl
sha256: "{{kubectl_checksum}}"
url: "{{ kube_download_url }}/kubectl"
owner: "kube"
mode: "0755"
- name: kubernetes-apiserver
dest: kubernetes/bin/kube-apiserver
sha256: "{{kube_apiserver_checksum}}"
url: "{{ kube_download_url }}/kube-apiserver"
owner: "kube"
mode: "0755"

View File

@@ -1,21 +0,0 @@
---
- name: Create calico release directory
local_action: file
path={{ local_release_dir }}/calico/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if calicoctl has been downloaded
local_action: stat
path={{ local_release_dir }}/calico/bin/calicoctl
register: c_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download calico
local_action: shell
curl -o {{ local_release_dir }}/calico/bin/calicoctl -Ls {{ calico_download_url }}/{{ calico_version }}/calicoctl
when: not c_tar.stat.exists
register: dl_calico
delegate_to: "{{ groups['kube-master'][0] }}"

View File

@@ -1,42 +0,0 @@
---
- name: Create etcd release directory
local_action: file
path={{ local_release_dir }}/etcd/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if etcd release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
register: e_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download etcd
local_action: shell
curl -o {{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz -Ls {{ etcd_download_url }}/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
when: not e_tar.stat.exists
register: dl_etcd
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract etcd archive
local_action: unarchive
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/etcd copy=no
when: dl_etcd|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only etcd binaries
local_action: copy
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/{{ item }}
dest={{ local_release_dir }}/etcd/bin
with_items:
- etcdctl
- etcd
when: dl_etcd|changed
- name: Delete unused etcd files
local_action: file
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64 state=absent
when: dl_etcd|changed

View File

@@ -1,39 +0,0 @@
---
- name: Create flannel release directory
local_action: file
path={{ local_release_dir }}/flannel
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if flannel release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
register: f_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download flannel
local_action: shell
curl -o {{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz -Ls {{ flannel_download_url }}/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz
when: not f_tar.stat.exists
register: dl_flannel
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract flannel archive
local_action: unarchive
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/flannel copy=no
when: dl_flannel|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only flannel binaries
local_action: copy
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}/flanneld
dest={{ local_release_dir }}/flannel/bin
when: dl_flannel|changed
- name: Delete unused flannel files
local_action: file
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }} state=absent
when: dl_flannel|changed

View File

@@ -1,47 +0,0 @@
---
- name: Create kubernetes release directory
local_action: file
path={{ local_release_dir }}/kubernetes
state=directory
- name: Check if kubernetes release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
# issues with get_url module and redirects, to be tested again in the near future
- name: Download kubernetes
local_action: shell
curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
register: dl_kube
- name: Compare kubernetes archive checksum
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
when: dl_kube|changed
- name: Extract kubernetes archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Extract kubernetes binaries archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Pick up only kubernetes binaries
local_action: synchronize
src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
dest={{ local_release_dir }}/kubernetes
when: dl_kube|changed
- name: Delete unused kubernetes files
local_action: file
path={{ local_release_dir }}/kubernetes/kubernetes state=absent
when: dl_kube|changed

View File

@@ -1,5 +1,32 @@
---
- include: kubernetes.yml
- include: etcd.yml
- include: calico.yml
- include: flannel.yml
- name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
with_items: downloads
- name: Download items
get_url:
url: "{{item.url}}"
dest: "{{local_release_dir}}/{{item.dest}}"
sha256sum: "{{item.sha256 | default(omit)}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
with_items: downloads
- name: Extract archives
unarchive:
src: "{{ local_release_dir }}/{{item.dest}}"
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
copy: no
when: "{{item.unarchive is defined and item.unarchive == True}}"
with_items: downloads
- name: Fix permissions
file:
state: file
path: "{{local_release_dir}}/{{item.dest}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
when: "{{item.unarchive is not defined or item.unarchive == False}}"
with_items: downloads

View File

@@ -0,0 +1,3 @@
---
etcd_version: v2.2.4
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"

View File

@@ -1,15 +1,15 @@
---
- name: restart daemons
- name: restart etcd
command: /bin/true
notify:
- reload systemd
- restart etcd2
- reload etcd
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd"
- name: restart etcd2
service: name=etcd2 state=restarted
- name: Save iptables rules
command: service iptables save
- name: reload etcd
service:
name: etcd
state: restarted

View File

@@ -1,15 +1,23 @@
---
- name: Disable ferm
service: name=ferm state=stopped enabled=no
- name: Configure | Copy etcd.service systemd file
template:
src: etcd.service.j2
dest: /lib/systemd/system/etcd.service
backup: yes
when: init_system == "systemd"
notify: restart etcd
- name: Create etcd2 environment vars dir
file: path=/etc/systemd/system/etcd2.service.d state=directory
- name: Configure | Write etcd initd script
template:
src: deb-etcd.initd.j2
dest: /etc/init.d/etcd
owner: root
mode: 0755
when: init_system == "sysvinit" and ansible_os_family == "Debian"
notify: restart etcd
- name: Write etcd2 config file
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf backup=yes
notify:
- reload systemd
- restart etcd2
- name: Ensure etcd2 is running
service: name=etcd2 state=started enabled=yes
- name: Configure | Create etcd config file
template:
src: etcd.j2
dest: /etc/etcd.env
notify: restart etcd

View File

@@ -1,25 +1,9 @@
---
- name: Create etcd user
user: name=etcd shell=/bin/nologin home=/var/lib/etcd2
- name: Install | Copy etcd binary
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
register: etcd_copy
changed_when: false
- name: Install etcd binaries
copy:
src={{ local_release_dir }}/etcd/bin/{{ item }}
dest={{ bin_dir }}
owner=etcd
mode=u+x
with_items:
- etcdctl
- etcd
notify:
- restart daemons
- name: Create etcd2 binary symlink
file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
- name: Copy etcd2.service systemd file
template:
src: systemd-etcd2.service.j2
dest: /lib/systemd/system/etcd2.service
backup: yes
notify: restart daemons
- name: Install | Copy etcdctl binary
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
changed_when: false

View File

@@ -1,3 +1,18 @@
---
- include: install.yml
- include: configure.yml
- name: Restart etcd if binary changed
command: /bin/true
notify: restart etcd
when: etcd_copy.stdout_lines
# reload systemd before starting service
- meta: flush_handlers
- name: Ensure etcd is running
service:
name: etcd
state: started
enabled: yes

View File

@@ -0,0 +1,113 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd distributed k/v store
# Description:
# etcd is a distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="etcd k/v store"
NAME=etcd
DAEMON={{ bin_dir }}/etcd
{% if inventory_hostname in groups['etcd'] %}
DAEMON_ARGS=""
{% else %}
DAEMON_ARGS="-proxy on"
{% endif %}
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -f /etc/etcd.env ] && . /etc/etcd.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON \
$DAEMON_OPTS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@@ -0,0 +1,17 @@
ETCD_DATA_DIR="/var/lib/etcd"
{% if inventory_hostname in groups['etcd'] %}
{% set etcd = {} %}
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}
{% set _dummy = etcd.update({'name':"etcd"+loop.index|string}) %}
{% endif %}
{% endfor %}
ETCD_ADVERTISE_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s_etcd"
ETCD_LISTEN_PEER_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380"
ETCD_NAME="{{ etcd.name }}"
{% endif %}
ETCD_INITIAL_CLUSTER="{% for host in groups['etcd'] %}etcd{{ loop.index|string }}=http://{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
ETCD_LISTEN_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"

View File

@@ -0,0 +1,18 @@
[Unit]
Description=etcd
[Service]
User=etcd
EnvironmentFile=/etc/etcd.env
{% if inventory_hostname in groups['etcd'] %}
ExecStart={{ bin_dir }}/etcd
{% else %}
ExecStart={{ bin_dir }}/etcd -proxy on
{% endif %}
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View File

@@ -1,17 +0,0 @@
# etcd2.0
[Service]
{% if inventory_hostname in groups['kube-master'] %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER_STATE=new"
Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
Environment="ETCD_NAME=master"
{% else %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_PROXY=on"
{% endif %}

View File

@@ -1,15 +0,0 @@
[Unit]
Description=etcd2
Conflicts=etcd.service
[Service]
User=etcd
Environment=ETCD_DATA_DIR=/var/lib/etcd2
Environment=ETCD_NAME=%m
ExecStart={{ bin_dir }}/etcd2
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View File

@@ -1,115 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Caller should set in the ev:
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
# MASTER_NAME - I'm not sure what it is...
# Also the following will be respected
# CERT_DIR - where to place the finished certs
# CERT_GROUP - who the group owner of the cert files should be
cert_ip="${MASTER_IP:="${1}"}"
master_name="${MASTER_NAME:="kubernetes"}"
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
dns_domain="${DNS_DOMAIN:="cluster.local"}"
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
cert_group="${CERT_GROUP:="kube-cert"}"
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
# TODO: Add support for discovery on other providers?
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
# but is originally taken from:
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
#
# To update, do the following:
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
#
# Due to GCS caching of public objects, it may take time for this to be widely
# distributed.
# Calculate the first ip address in the service range
octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octects[3]+=1))
service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
# Determine appropriete subject alt names
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
tar xzf easy-rsa.tar.gz > /dev/null
cd easy-rsa-master/easyrsa3
(./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
echo "=== Failed to generate certificates: Aborting ==="
exit 2
}
mkdir -p "$cert_dir"
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
for cert in "${CERTS[@]}"; do
chgrp "${cert_group}" "${cert_dir}/${cert}"
chmod 660 "${cert_dir}/${cert}"
done

View File

@@ -1,3 +0,0 @@
---
dependencies:
- { role: etcd }

View File

@@ -1,42 +0,0 @@
---
#- name: Get create ca cert script from Kubernetes
# get_url:
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
# force=yes
- name: certs | install cert generation script
copy:
src=make-ca-cert.sh
dest={{ kube_script_dir }}
mode=0500
changed_when: false
# FIXME This only generates a cert for one master...
- name: certs | run cert generation script
command:
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
args:
creates: "{{ kube_cert_dir }}/server.crt"
environment:
MASTER_IP: "{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}"
MASTER_NAME: "{{ inventory_hostname }}"
DNS_DOMAIN: "{{ dns_domain }}"
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
CERT_DIR: "{{ kube_cert_dir }}"
CERT_GROUP: "{{ kube_cert_group }}"
- name: certs | check certificate permissions
file:
path={{ item }}
group={{ kube_cert_group }}
owner=kube
mode=0440
with_items:
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/server.crt"
- "{{ kube_cert_dir }}/server.key"
- "{{ kube_cert_dir }}/kubecfg.crt"
- "{{ kube_cert_dir }}/kubecfg.key"
- "{{ kube_cert_dir }}/kubelet.crt"
- "{{ kube_cert_dir }}/kubelet.key"

View File

@@ -1,30 +0,0 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
- "{{ groups['kube-master'][0] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet', 'system:proxy' ]
- "{{ groups['kube-node'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons

View File

@@ -1,29 +0,0 @@
---
- name: define alias command for kubectl all
lineinfile:
dest=/etc/bash.bashrc
line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
regexp='^alias kball=.*$'
state=present
insertafter=EOF
create=True
- name: create kubernetes config directory
file: path={{ kube_config_dir }} state=directory
- name: create kubernetes script directory
file: path={{ kube_script_dir }} state=directory
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory
- name: write the global config file
template:
src: config.j2
dest: "{{ kube_config_dir }}/config"
notify:
- restart daemons
- include: secrets.yml
tags:
- secrets

View File

@@ -1,54 +0,0 @@
---
- name: certs | create system kube-cert groups
group: name={{ kube_cert_group }} state=present system=yes
- name: create system kube user
user:
name=kube
comment="Kubernetes user"
shell=/sbin/nologin
state=present
system=yes
groups={{ kube_cert_group }}
- name: certs | make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- name: tokens | make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- include: gen_certs.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]
- name: Read back the CA certificate
slurp:
src: "{{ kube_cert_dir }}/ca.crt"
register: ca_cert
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: certs | register the CA certificate as a fact for later use
set_fact:
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
- name: certs | write CA certificate everywhere
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
notify:
- restart daemons
- debug: msg="{{groups['kube-master'][0]}} == {{inventory_hostname}}"
tags:
- debug
- include: gen_tokens.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]

View File

@@ -1,26 +0,0 @@
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# Comma separated list of nodes in the etcd cluster
# KUBE_ETCD_SERVERS="--etcd_servers="
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=5"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
# How the replication controller, scheduler, and proxy
KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@@ -1,56 +1,4 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart reloaded-scheduler
- restart reloaded-controller-manager
- restart reloaded-apiserver
- restart reloaded-proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart apiserver
command: /bin/true
notify:
- reload systemd
- restart reloaded-apiserver
- name: restart reloaded-apiserver
service:
name: kube-apiserver
state: restarted
- name: restart controller-manager
command: /bin/true
notify:
- reload systemd
- restart reloaded-controller-manager
- name: restart reloaded-controller-manager
service:
name: kube-controller-manager
state: restarted
- name: restart scheduler
command: /bin/true
notify:
- reload systemd
- restart reloaded-scheduler
- name: restart reloaded-scheduler
service:
name: kube-scheduler
state: restarted
- name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service:
name: kube-proxy
state: restarted
- name: restart kube-apiserver
set_fact:
restart_apimaster: True

View File

@@ -1,3 +1,4 @@
---
dependencies:
- { role: kubernetes/common }
- { role: etcd }
- { role: kubernetes/node }

View File

@@ -1,94 +0,0 @@
---
- name: get the node token values from token files
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:controller_manager"
- "system:scheduler"
- "system:kubectl"
- "system:proxy"
register: tokens
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
proxy_token: "{{ tokens.results[3].content|b64decode }}"
- name: write the config files for api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes
notify:
- restart apiserver
- name: write config file for controller-manager
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes
notify:
- restart controller-manager
- name: write the kubecfg (auth) file for controller-manager
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig backup=yes
notify:
- restart controller-manager
- name: write the config file for scheduler
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler backup=yes
notify:
- restart scheduler
- name: write the kubecfg (auth) file for scheduler
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig backup=yes
notify:
- restart scheduler
- name: write the kubecfg (auth) file for kubectl
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig backup=yes
- name: Copy kubectl bash completion
copy: src=kubectl_bash_completion.sh dest=/etc/bash_completion.d/kubectl.sh
- name: Create proxy environment vars dir
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
notify:
- restart proxy
- name: write the kubecfg (auth) file for proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
- name: populate users for basic auth in API
lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}"
notify:
- restart apiserver
- name: Enable controller-manager
service:
name: kube-controller-manager
enabled: yes
state: started
- name: Enable scheduler
service:
name: kube-scheduler
enabled: yes
state: started
- name: Enable kube-proxy
service:
name: kube-proxy
enabled: yes
state: started
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started

View File

@@ -0,0 +1,24 @@
---
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube-master'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
when: inventory_hostname == groups['kube-master'][0]
notify: restart kube-apiserver
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet' ]
- "{{ groups['kube-node'] }}"
register: gentoken_node
changed_when: "'Added' in gentoken_node.stdout"
when: inventory_hostname == groups['kube-master'][0]
notify: restart kube-apiserver

View File

@@ -1,34 +0,0 @@
---
- name: Write kube-apiserver systemd init file
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes
notify: restart apiserver
- name: Write kube-controller-manager systemd init file
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes
notify: restart controller-manager
- name: Write kube-scheduler systemd init file
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes
notify: restart scheduler
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
notify: restart proxy
- name: Install kubernetes binaries
copy:
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
dest={{ bin_dir }}
owner=kube
mode=u+x
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kube-proxy
- kubectl
notify:
- restart daemons
- name: Allow apiserver to bind on both secure and insecure ports
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver

View File

@@ -1,3 +1,126 @@
---
- include: install.yml
- include: config.yml
- include: gen_kube_tokens.yml
tags: tokens
- name: Copy kubectl bash completion
copy:
src: kubectl_bash_completion.sh
dest: /etc/bash_completion.d/kubectl.sh
- name: Copy kube-apiserver binary
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kube-apiserver" "{{ bin_dir }}/kube-apiserver"
register: kube_apiserver_copy
changed_when: false
- name: Copy kubectl binary
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kubectl" "{{ bin_dir }}/kubectl"
changed_when: false
- name: populate users for basic auth in API
lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}"
notify: restart kube-apiserver
# Sync masters
- name: synchronize auth directories for masters
synchronize:
src: "{{ item }}"
dest: "{{ kube_config_dir }}"
recursive: yes
delete: yes
rsync_opts: [ '--one-file-system']
set_remote_user: false
with_items:
- "{{ kube_token_dir }}"
- "{{ kube_cert_dir }}"
- "{{ kube_users_dir }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: inventory_hostname != "{{ groups['kube-master'][0] }}"
- name: install | Write kube-apiserver systemd init file
template:
src: "kube-apiserver.service.j2"
dest: "/etc/systemd/system/kube-apiserver.service"
backup: yes
when: init_system == "systemd"
notify: restart kube-apiserver
- name: install | Write kube-apiserver initd script
template:
src: "deb-kube-apiserver.initd.j2"
dest: "/etc/init.d/kube-apiserver"
owner: root
mode: 0755
backup: yes
when: init_system == "sysvinit" and ansible_os_family == "Debian"
- name: Write kube-apiserver config file
template:
src: "kube-apiserver.j2"
dest: "{{ kube_config_dir }}/kube-apiserver.env"
backup: yes
notify: restart kube-apiserver
- name: Allow apiserver to bind on both secure and insecure ports
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
changed_when: false
- name: Restart apiserver
command: "/bin/true"
notify: restart kube-apiserver
when: is_gentoken_calico|default(false)
- meta: flush_handlers
- include: start.yml
with_items: groups['kube-master']
when: "{{ hostvars[item].inventory_hostname == inventory_hostname }}"
# Create kube-system namespace
- name: copy 'kube-system' namespace manifest
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
- name: Check if kube-system exists
command: kubectl get ns kube-system
register: 'kubesystem'
changed_when: False
ignore_errors: yes
run_once: yes
- name: wait for the apiserver to be running
wait_for:
port: "{{kube_apiserver_insecure_port}}"
timeout: 60
- name: Create 'kube-system' namespace
command: kubectl create -f /etc/kubernetes/kube-system-ns.yml
changed_when: False
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
# Write manifests
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_config_dir }}/kube-controller-manager.manifest"
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_config_dir }}/kube-scheduler.manifest"
- name: Write podmaster manifest
template:
src: manifests/kube-podmaster.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest"
- name: restart kubelet
service:
name: kubelet
state: restarted
changed_when: false

View File

@@ -0,0 +1,21 @@
---
- name: Pause
pause: seconds=10
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd" and restart_apimaster is defined and restart_apimaster == True
- name: reload kube-apiserver
service:
name: kube-apiserver
state: restarted
enabled: yes
when: restart_apimaster is defined and restart_apimaster == True
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started
when: restart_apimaster is not defined or restart_apimaster == False

View File

@@ -1,28 +0,0 @@
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#
# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--insecure-port={{kube_master_insecure_port}} --secure-port={{ kube_master_port }}"
# KUBELET_PORT="--kubelet_port=10250"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# RUNTIME API CONFIGURATION (e.g. enable extensions)
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
# Add you own!
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"

View File

@@ -1,6 +0,0 @@
###
# The following values are used to configure the kubernetes controller-manager
# defaults from config and apiserver should be adequate
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: controller-manager-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: controller-manager
name: controller-manager-to-{{ cluster_name }}
users:
- name: controller-manager
user:
token: {{ controller_manager_token }}

View File

@@ -0,0 +1,118 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: kube-apiserver
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: The Kubernetes apiserver
# Description:
# The Kubernetes apiserver.
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="The Kubernetes apiserver"
NAME=kube-apiserver
DAEMON={{ bin_dir }}/kube-apiserver
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER -- \
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@@ -0,0 +1,44 @@
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
{% if init_system == "sysvinit" %}
# Logging directory
KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
{% else %}
# logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true"
{% endif %}
# Apiserver Log level, 0 is debug
KUBE_LOG_LEVEL="{{ kube_log_level | default('--v=2') }}"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
# The port on the local server to listen on.
KUBE_API_PORT="--insecure-port={{kube_apiserver_insecure_port}} --secure-port={{ kube_apiserver_port }}"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for host in groups['etcd'] %}http://{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# RUNTIME API CONFIGURATION (e.g. enable extensions)
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
# TLS CONFIGURATION
KUBE_TLS_CONFIG="--tls_cert_file={{ kube_cert_dir }}/apiserver.pem --tls_private_key_file={{ kube_cert_dir }}/apiserver-key.pem --client_ca_file={{ kube_cert_dir }}/ca.pem"
# Add you own!
KUBE_API_ARGS="--token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/apiserver-key.pem"
{% if init_system == "sysvinit" %}
DAEMON_ARGS="$KUBE_LOGGING $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBE_API_PORT $KUBE_SERVICE_ADDRESSES \
$KUBE_ETCD_SERVERS $KUBE_ADMISSION_CONTROL $KUBE_RUNTIME_CONFIG $KUBE_TLS_CONFIG $KUBE_API_ARGS"
{% endif %}

View File

@@ -0,0 +1,28 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd.service
After=etcd.service
[Service]
EnvironmentFile=/etc/kubernetes/kube-apiserver.env
User=kube
ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_RUNTIME_CONFIG \
$KUBE_TLS_CONFIG \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -4,8 +4,8 @@ current-context: kubectl-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority-data: {{ kube_ca_cert|b64encode }}
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
certificate-authority-data: {{ kube_node_cert|b64encode }}
server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@@ -0,0 +1,52 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- apiserver
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }}
{% if kube_api_runtime_config is defined %}
{% for conf in kube_api_runtime_config %}
- --runtime-config={{ conf }}
{% endfor %}
{% endif %}
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
- --v={{ kube_log_level | default('2') }}
- --allow-privileged=true
ports:
- containerPort: {{ kube_apiserver_port }}
hostPort: {{ kube_apiserver_port }}
name: https
- containerPort: {{ kube_apiserver_insecure_port }}
hostPort: {{ kube_apiserver_insecure_port }}
name: local
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: kubernetes-config
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: {{ kube_config_dir }}
name: kubernetes-config
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --v={{ kube_log_level | default('2') }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 1
volumeMounts:
- mountPath: {{ kube_cert_dir }}
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
volumes:
- hostPath:
path: {{ kube_cert_dir }}
name: ssl-certs-kubernetes
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-podmaster
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: scheduler-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --key=scheduler
- --source-file={{ kube_config_dir}}/kube-scheduler.manifest
- --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: manifest-src
readOnly: true
- mountPath: {{ kube_manifest_dir }}
name: manifest-dst
- name: controller-manager-elector
image: gcr.io/google_containers/podmaster:1.1
command:
- /podmaster
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --key=controller
- --source-file={{ kube_config_dir }}/kube-controller-manager.manifest
- --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest
terminationMessagePath: /dev/termination-log
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: manifest-src
readOnly: true
- mountPath: {{ kube_manifest_dir }}
name: manifest-dst
volumes:
- hostPath:
path: {{ kube_config_dir }}
name: manifest-src
- hostPath:
path: {{ kube_manifest_dir }}
name: manifest-dst

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
- --v={{ kube_log_level | default('2') }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 1

View File

@@ -1,8 +0,0 @@
###
# kubernetes proxy config
# default config should be adequate
# Add your own!
[Service]
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: proxy-to-{{ cluster_name }}
preferences: {}
contexts:
- context:
cluster: {{ cluster_name }}
user: proxy
name: proxy-to-{{ cluster_name }}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}
name: {{ cluster_name }}
users:
- name: proxy
user:
token: {{ proxy_token }}

View File

@@ -1,7 +0,0 @@
###
# kubernetes scheduler config
# default config should be adequate
# Add your own!
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: scheduler-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: scheduler
name: scheduler-to-{{ cluster_name }}
users:
- name: scheduler
user:
token: {{ scheduler_token }}

View File

@@ -1,29 +0,0 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
User=kube
ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_RUNTIME_CONFIG \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,20 +0,0 @@
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
User=kube
ExecStart={{ bin_dir }}/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,22 +0,0 @@
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service calico-node.service
{% else %}
After=docker.service
{% endif %}
[Service]
EnvironmentFile=/etc/kubernetes/config
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,20 +0,0 @@
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
User=kube
ExecStart={{ bin_dir }}/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,6 @@
---
namespace_kubesystem:
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@@ -12,7 +12,7 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_config_dir: /etc/kubernetes
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/certs"
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
@@ -24,21 +24,25 @@ kube_users_dir: "{{ kube_config_dir }}/users"
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
dns_domain: "{{ cluster_name }}"
kube_proxy_mode: iptables
kube_proxy_mode: userspace
# Temporary image, waiting for official google release
# hyperkube_image_repo: gcr.io/google_containers/hyperkube
hyperkube_image_repo: quay.io/ant31/kubernetes-hyperkube
hyperkube_image_tag: v1.1.4
# IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS
# server and expose it under this IP address. The IP address must be from
# the range specified as kube_service_addresses. This magic will actually
# pick the 10th ip address in the kube_service_addresses range and use that.
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
# kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true
kube_api_runtime_config:
- extensions/v1beta1/daemonsets=true
- extensions/v1beta1/deployments=true

View File

@@ -19,7 +19,10 @@ token_file="${token_dir}/known_tokens.csv"
create_accounts=($@)
touch "${token_file}"
if [ ! -e "${token_file}" ]; then
touch "${token_file}"
fi
for account in "${create_accounts[@]}"; do
if grep ",${account}," "${token_file}" ; then
continue

View File

@@ -0,0 +1,107 @@
#!/bin/bash
# Author: skahlouc@skahlouc-laptop
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
usage()
{
cat << EOF
Create self signed certificates
Usage : $(basename $0) -f <config> [-c <cloud_provider>] [-d <ssldir>] [-g <ssl_group>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-c | --cloud : Cloud provider (GCE, AWS or AZURE)
-d | --ssldir : Directory where the certificates will be installed
-g | --sslgrp : Group of the certificates
ex :
$(basename $0) -f openssl.conf -c GCE -d /srv/ssl -g kube
EOF
}
# Options parsing
while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-c | --cloud) CLOUD=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
-g | --group) SSLGRP="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
exit 3
;;
esac
done
if [ -z ${CONFIG} ]; then
echo "ERROR: the openssl configuration file is missing. option -f"
exit 1
fi
if [ -z ${SSLDIR} ]; then
SSLDIR="/etc/kubernetes/certs"
fi
if [ -z ${SSLGRP} ]; then
SSLGRP="kube-cert"
fi
#echo "config=$CONFIG, cloud=$CLOUD, certdir=$SSLDIR, certgroup=$SSLGRP"
SUPPORTED_CLOUDS="GCE AWS AZURE"
# TODO: Add support for discovery on other providers?
if [ "${CLOUD}" == "GCE" ]; then
CLOUD_IP=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "${CLOUD}" == "AWS" ]; then
CLOUD_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [ "${CLOUD}" == "AZURE" ]; then
CLOUD_IP=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
mkdir -p "${SSLDIR}"
# Root CA
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
# Apiserver
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
# Nodes and Admin
for i in node admin; do
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
done
# Install certs
mv *.pem ${SSLDIR}/
chgrp ${SSLGRP} ${SSLDIR}/*
chmod 600 ${SSLDIR}/*-key.pem
chown root:root ${SSLDIR}/*-key.pem

View File

@@ -1,32 +1,19 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart reloaded-kubelet
- restart reloaded-proxy
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd"
- name: restart kubelet
command: /bin/true
notify:
- reload systemd
- restart reloaded-kubelet
- reload kubelet
- name: restart reloaded-kubelet
- name: set is_gentoken_calico fact
set_fact:
is_gentoken_calico: true
- name: reload kubelet
service:
name: kubelet
state: restarted
- name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service:
name: kube-proxy
state: restarted

View File

@@ -1,3 +0,0 @@
---
dependencies:
- { role: kubernetes/common }

View File

@@ -1,53 +0,0 @@
---
- name: Get the node token values
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:kubelet"
- "system:proxy"
register: tokens
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
proxy_token: "{{ tokens.results[1].content|b64decode }}"
- name: Create kubelet environment vars dir
file: path=/etc/systemd/system/kubelet.service.d state=directory
- name: Write kubelet config file
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes
notify:
- restart kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig backup=yes
notify:
- restart kubelet
- name: Create proxy environment vars dir
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
notify:
- restart proxy
- name: write the kubecfg (auth) file for kube-proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
notify:
- restart proxy
- name: Enable kubelet
service:
name: kubelet
enabled: yes
state: started
- name: Enable proxy
service:
name: kube-proxy
enabled: yes
state: started

View File

@@ -0,0 +1,27 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for calico
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:calico" ]
- "{{ groups['k8s-cluster'] }}"
register: gentoken_calico
changed_when: "'Added' in gentoken_calico.stdout"
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"
notify: set is_gentoken_calico fact
- name: tokens | get the calico token values
slurp:
src: "{{ kube_token_dir }}/system:calico-{{ inventory_hostname }}.token"
register: calico_token
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"

Some files were not shown because too many files have changed in this diff Show More