mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
233 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7d52564aa | ||
|
|
f7e8d1149a | ||
|
|
bd091caaf9 | ||
|
|
b455a1bf76 | ||
|
|
c0a3bcf9b3 | ||
|
|
5eedb5562f | ||
|
|
dc6c703741 | ||
|
|
16629d0b8e | ||
|
|
7f79210ed1 | ||
|
|
c1267004ef | ||
|
|
9cdd2214f9 | ||
|
|
fc29764911 | ||
|
|
989e9174c2 | ||
|
|
3993e12335 | ||
|
|
ac4d782937 | ||
|
|
32d18ca992 | ||
|
|
2df4b6c5d2 | ||
|
|
3846384d56 | ||
|
|
331f141f63 | ||
|
|
62dd3d2a9d | ||
|
|
e22c70e431 | ||
|
|
f4fe9e3421 | ||
|
|
da173615e4 | ||
|
|
dc6a17e092 | ||
|
|
f4180503c8 | ||
|
|
240d4193ae | ||
|
|
ac66e98ae9 | ||
|
|
d2935ffed0 | ||
|
|
c6e0fcea31 | ||
|
|
5d014d986b | ||
|
|
714994cad8 | ||
|
|
08fe61e058 | ||
|
|
0c8bed21ee | ||
|
|
98eb845f8c | ||
|
|
98300e3165 | ||
|
|
bf1411060e | ||
|
|
a4d142368b | ||
|
|
eb80f9b606 | ||
|
|
ae47b617e3 | ||
|
|
c116b8022e | ||
|
|
5b98e15613 | ||
|
|
e5b4011aa4 | ||
|
|
3125f93b3f | ||
|
|
f19c8e8c1d | ||
|
|
752fba1691 | ||
|
|
637604d08f | ||
|
|
1a9989ade9 | ||
|
|
8c45c88d15 | ||
|
|
c87bb2f239 | ||
|
|
32eeb9a0e0 | ||
|
|
df21fc8643 | ||
|
|
ffbdf31ac4 | ||
|
|
ccd9cc3dce | ||
|
|
81867402f6 | ||
|
|
4f5d61212b | ||
|
|
ef96123482 | ||
|
|
ee27ab0052 | ||
|
|
57f87ba083 | ||
|
|
a9bb72c6fd | ||
|
|
9506c2e597 | ||
|
|
32884357ff | ||
|
|
278ac08087 | ||
|
|
88204642b7 | ||
|
|
1401286910 | ||
|
|
12eb242224 | ||
|
|
8f36a02998 | ||
|
|
88f9e25f76 | ||
|
|
dba1c13954 | ||
|
|
df9faa1743 | ||
|
|
ce85bcaee7 | ||
|
|
6ed2a60978 | ||
|
|
fd04c14260 | ||
|
|
10a5273f07 | ||
|
|
bac3bf1a5f | ||
|
|
e3b684df21 | ||
|
|
e45b30d033 | ||
|
|
ad6fecefa8 | ||
|
|
3fdb2ccf55 | ||
|
|
29f5b55d42 | ||
|
|
5aef52e8c0 | ||
|
|
336e0cbf70 | ||
|
|
3cd06b0eb4 | ||
|
|
6bb46e3ecb | ||
|
|
127bc01857 | ||
|
|
a6975c1850 | ||
|
|
b2cb0725ac | ||
|
|
b974b144a8 | ||
|
|
bfb25fa47b | ||
|
|
b135bcb9d9 | ||
|
|
0771cd8599 | ||
|
|
91d848f98a | ||
|
|
40edf8c6f5 | ||
|
|
e78562830f | ||
|
|
39ce1bd8be | ||
|
|
6291881943 | ||
|
|
802fd94dad | ||
|
|
66f38a1b31 | ||
|
|
d3850a4da5 | ||
|
|
53a4355e60 | ||
|
|
18a616f57c | ||
|
|
32333eb627 | ||
|
|
19def41fdf | ||
|
|
44b9dce134 | ||
|
|
fa5a538fe5 | ||
|
|
5e3fd2253f | ||
|
|
9643c2c1e3 | ||
|
|
93f3614382 | ||
|
|
cbc8a7d679 | ||
|
|
290bc993a5 | ||
|
|
3694657eb6 | ||
|
|
79417e07ca | ||
|
|
626b35e1b0 | ||
|
|
fed7b97dcb | ||
|
|
c4458c9d9a | ||
|
|
7bae2a4547 | ||
|
|
aeb3e647d4 | ||
|
|
fe036cbe77 | ||
|
|
952ec65a40 | ||
|
|
b8788421d5 | ||
|
|
c2347db934 | ||
|
|
27ead5d4fa | ||
|
|
591ae700ce | ||
|
|
6ade7c0a8d | ||
|
|
b3745f2614 | ||
|
|
ca8a9c600a | ||
|
|
a0225507a0 | ||
|
|
d39a88d63f | ||
|
|
e5d353d0a7 | ||
|
|
de422c822d | ||
|
|
4d3326b542 | ||
|
|
1b82138142 | ||
|
|
208ff8e350 | ||
|
|
ec54b36e05 | ||
|
|
38e8522cbf | ||
|
|
52f8687397 | ||
|
|
43600ffcf8 | ||
|
|
938d2d9e6e | ||
|
|
9368dbe0e7 | ||
|
|
fe3290601a | ||
|
|
e7173e1d62 | ||
|
|
8aafe64397 | ||
|
|
2140303fcc | ||
|
|
b80ded63ca | ||
|
|
7be2521a31 | ||
|
|
15b9d54a32 | ||
|
|
bc1a4e12ad | ||
|
|
67419e8d0a | ||
|
|
849aaf7435 | ||
|
|
a89ee8c406 | ||
|
|
0c6f172e75 | ||
|
|
a67349b076 | ||
|
|
f9b68a5d17 | ||
|
|
c7910b51a1 | ||
|
|
1f99710b21 | ||
|
|
5e558c361b | ||
|
|
5f39efcdfd | ||
|
|
037edf1215 | ||
|
|
37125866ca | ||
|
|
421e73b87c | ||
|
|
0d8de289dd | ||
|
|
00916dec38 | ||
|
|
c115e5677e | ||
|
|
56047c1c83 | ||
|
|
09d85631dc | ||
|
|
f25e4dc3ed | ||
|
|
a3a7c2d24e | ||
|
|
0126168472 | ||
|
|
e9f795c5ce | ||
|
|
0c7e1889e4 | ||
|
|
8b2bec700a | ||
|
|
125267544e | ||
|
|
0d55ed3600 | ||
|
|
ad0cd6939a | ||
|
|
a1244d7bd3 | ||
|
|
33adb334cd | ||
|
|
ef87a8a1f0 | ||
|
|
5223a80ab8 | ||
|
|
a595c84f7e | ||
|
|
adcfcc1178 | ||
|
|
b158dbcf79 | ||
|
|
ab3832f3e7 | ||
|
|
9bf415f749 | ||
|
|
a2bda9e5f1 | ||
|
|
0195725563 | ||
|
|
ec1170bd37 | ||
|
|
66c67dbe73 | ||
|
|
e5d8d8234d | ||
|
|
16ae2c1809 | ||
|
|
5c5e879c2c | ||
|
|
4771716ab2 | ||
|
|
b156585739 | ||
|
|
7a77b5c419 | ||
|
|
9872b594bf | ||
|
|
e6c88db0a0 | ||
|
|
257280a050 | ||
|
|
520103df78 | ||
|
|
3e3787de15 | ||
|
|
0c824d5ef1 | ||
|
|
c0e989b17c | ||
|
|
5218b3af82 | ||
|
|
ef0a91da27 | ||
|
|
8412181746 | ||
|
|
400ee2aa57 | ||
|
|
05b8466f87 | ||
|
|
6061c691e6 | ||
|
|
3ac967a7b6 | ||
|
|
19962f6b6a | ||
|
|
f7703dbca3 | ||
|
|
74a9eedb93 | ||
|
|
6df104b275 | ||
|
|
b27453d8d8 | ||
|
|
4470ee4ccf | ||
|
|
df27fd1e9c | ||
|
|
97c68810e0 | ||
|
|
8a86acf75d | ||
|
|
160e479f8d | ||
|
|
d738acf638 | ||
|
|
84d92aa3c7 | ||
|
|
dd01cabcdc | ||
|
|
e196adb98c | ||
|
|
c383c7e2c1 | ||
|
|
958bb5285d | ||
|
|
f0317ae70b | ||
|
|
591941bd39 | ||
|
|
e90769c869 | ||
|
|
256bbb1a8a | ||
|
|
2c7c956be9 | ||
|
|
fe81bba08d | ||
|
|
564de07963 | ||
|
|
84cf6fbe83 | ||
|
|
d9160f19c0 | ||
|
|
6e949bf951 | ||
|
|
3acc42c5b3 |
@@ -94,9 +94,11 @@ before_script:
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kargo repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 72ae7638bcc94c66afa8620dfa4ad9a9249327ea
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||
|
||||
|
||||
# Create cluster
|
||||
@@ -250,6 +252,10 @@ before_script:
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
@@ -422,6 +428,17 @@ centos-weave-kubeadm-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
ubuntu-contiv-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_contiv_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
rhel7-weave:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
|
||||
@@ -54,11 +54,12 @@ Versions of supported components
|
||||
--------------------------------
|
||||
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.8.1 <br>
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||
[contiv](https://github.com/contiv/install/releases) v1.0.3 <br>
|
||||
[weave](http://weave.works/) v2.0.1 <br>
|
||||
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||
@@ -93,6 +94,9 @@ You can choose between 4 network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
* [**contiv**](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
|
||||
@@ -107,7 +111,7 @@ See also [Network checker](docs/netcheck.md).
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||
|
||||
## Tools and projects on top of Kubespray
|
||||
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
|
||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
3
Vagrantfile
vendored
3
Vagrantfile
vendored
@@ -129,6 +129,9 @@ Vagrant.configure("2") do |config|
|
||||
config.vm.provision "shell", inline: "service network restart", run: "always"
|
||||
end
|
||||
|
||||
# Disable swap for each vm
|
||||
config.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# Only execute once the Ansible provisioner,
|
||||
# when all the machines are up and ready.
|
||||
if i == $num_instances
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
ansible_ssh_common_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||
#ansible_ssh_common_args = -F {{ inventory_dir|quote }}/ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
host_key_checking=False
|
||||
@@ -11,4 +10,5 @@ fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
|
||||
18
cluster.yml
18
cluster.yml
@@ -32,12 +32,14 @@
|
||||
tags: rkt
|
||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||
- { role: download, tags: download, skip_downloads: false }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -45,23 +47,25 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s-cluster:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -78,11 +82,16 @@
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
|
||||
@@ -98,6 +107,7 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
|
||||
@@ -1,58 +1,3 @@
|
||||
## Kubernetes Community Code of Conduct
|
||||
# Kubernetes Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of Kubernetes event staff.
|
||||
|
||||
|
||||
[]()
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
%global srcname ansible_kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: ansible-kubespray
|
||||
Version: XXX
|
||||
Release: XXX
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Vendor: Kubespray <smainklh@gmail.com>
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python-pbr
|
||||
|
||||
Requires: ansible
|
||||
Requires: python-jinja2
|
||||
Requires: python-netaddr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
%{__python2} setup.py build
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{version}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot}
|
||||
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%doc inventory/inventory.example
|
||||
%config /etc/kubespray/ansible.cfg
|
||||
%config /etc/kubespray/inventory/group_vars/all.yml
|
||||
%config /etc/kubespray/inventory/group_vars/k8s-cluster.yml
|
||||
%license LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info
|
||||
/usr/local/share/kubespray/roles/
|
||||
/usr/local/share/kubespray/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
61
contrib/packaging/rpm/kubespray.spec
Normal file
61
contrib/packaging/rpm/kubespray.spec
Normal file
@@ -0,0 +1,61 @@
|
||||
%global srcname kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: kubespray
|
||||
Version: master
|
||||
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python2-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
export PBR_VERSION=%{release}
|
||||
%{__python2} setup.py build bdist_rpm
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{release}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
|
||||
|
||||
|
||||
%files
|
||||
%doc %{_docdir}/%{name}/README.md
|
||||
%doc %{_docdir}/%{name}/inventory/inventory.example
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/group_vars/k8s-cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
%{_datarootdir}/%{name}/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
@@ -24,7 +24,7 @@ export AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
@@ -36,17 +36,72 @@ terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_addres
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
```commandline
|
||||
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
```
|
||||
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||
|
||||
Example (this one assumes you are using CoreOS)
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
```
|
||||
***Using other distrib than CoreOs***
|
||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
For example, to use:
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-jessie-amd64-hvm-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["dcos-centos7-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
|
||||
**Troubleshooting**
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ provider "aws" {
|
||||
region = "${var.AWS_DEFAULT_REGION}"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
||||
/*
|
||||
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||
* and AWS IAM Roles for Kubernetes Deployment
|
||||
@@ -18,7 +20,7 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
aws_avail_zones="${var.aws_avail_zones}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||
default_tags="${var.default_tags}"
|
||||
@@ -31,7 +33,7 @@ module "aws-elb" {
|
||||
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones="${var.aws_avail_zones}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||
@@ -49,12 +51,13 @@ module "aws-iam" {
|
||||
* Create Bastion Instances in AWS
|
||||
*
|
||||
*/
|
||||
|
||||
resource "aws_instance" "bastion-server" {
|
||||
ami = "${var.aws_bastion_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_bastion_size}"
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
associate_public_ip_address = true
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||
|
||||
|
||||
@@ -76,13 +79,13 @@ resource "aws_instance" "bastion-server" {
|
||||
*/
|
||||
|
||||
resource "aws_instance" "k8s-master" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_master_size}"
|
||||
|
||||
count = "${var.aws_kube_master_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
@@ -95,7 +98,7 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))}"
|
||||
}
|
||||
@@ -108,13 +111,13 @@ resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_etcd_size}"
|
||||
|
||||
count = "${var.aws_etcd_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
@@ -124,7 +127,7 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))}"
|
||||
|
||||
@@ -132,12 +135,12 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_worker_size}"
|
||||
|
||||
count = "${var.aws_kube_worker_num}"
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
@@ -148,7 +151,7 @@ resource "aws_instance" "k8s-worker" {
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))}"
|
||||
|
||||
@@ -162,7 +165,7 @@ resource "aws_instance" "k8s-worker" {
|
||||
*/
|
||||
data "template_file" "inventory" {
|
||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||
|
||||
|
||||
vars {
|
||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||
@@ -172,8 +175,6 @@ data "template_file" "inventory" {
|
||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTP:8080/"
|
||||
target = "TCP:${var.k8s_secure_api_port}"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,8 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,6 @@ output "aws_security_group" {
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${default_tags}"
|
||||
value = "${var.default_tags}"
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,5 +24,5 @@ output "inventory" {
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${default_tags}"
|
||||
}
|
||||
value = "${var.default_tags}"
|
||||
}
|
||||
|
||||
@@ -25,5 +25,3 @@ kube-master
|
||||
|
||||
[k8s-cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${elb_api_port}
|
||||
${loadbalancer_apiserver_address}
|
||||
|
||||
@@ -5,10 +5,8 @@ aws_cluster_name = "devtest"
|
||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||
aws_avail_zones = ["us-west-2a","us-west-2b"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_ami = "ami-db56b9a3"
|
||||
aws_bastion_size = "t2.medium"
|
||||
|
||||
|
||||
@@ -23,8 +21,6 @@ aws_etcd_size = "t2.medium"
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_cluster_ami = "ami-db56b9a3"
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
|
||||
@@ -20,6 +20,21 @@ variable "aws_cluster_name" {
|
||||
description = "Name of AWS Cluster"
|
||||
}
|
||||
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["CoreOS-stable-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["595879546273"] #CoreOS
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
||||
@@ -27,11 +42,6 @@ variable "aws_vpc_cidr_block" {
|
||||
description = "CIDR Block for VPC"
|
||||
}
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||
type = "list"
|
||||
@@ -44,10 +54,6 @@ variable "aws_cidr_subnets_public" {
|
||||
|
||||
//AWS EC2 Settings
|
||||
|
||||
variable "aws_bastion_ami" {
|
||||
description = "AMI ID for Bastion Host in chosen AWS Region"
|
||||
}
|
||||
|
||||
variable "aws_bastion_size" {
|
||||
description = "EC2 Instance Size of Bastion Host"
|
||||
}
|
||||
@@ -81,9 +87,6 @@ variable "aws_kube_worker_size" {
|
||||
description = "Instance size of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
variable "aws_cluster_ami" {
|
||||
description = "AMI ID for Kubernetes Cluster"
|
||||
}
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
*
|
||||
@@ -96,10 +99,6 @@ variable "k8s_secure_api_port" {
|
||||
description = "Secure Port of K8S API Server"
|
||||
}
|
||||
|
||||
variable "loadbalancer_apiserver_address" {
|
||||
description= "Bind Address for ELB of K8s API Server"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
|
||||
@@ -5,65 +5,91 @@ Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||
services.
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
|
||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by the main ansible script
|
||||
to actually install kubernetes and stand up the cluster.
|
||||
|
||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which needs to be used on a master. If using more than one, at least one should be on a master for bastions to work fine.
|
||||
* you already have a suitable OS image in glance
|
||||
* you already have both an internal network and a floating-ip pool created
|
||||
* you have security-groups enabled
|
||||
### Networking
|
||||
The configuration includes creating a private subnet with a router to the
|
||||
external net. It will allocate floating-ips from a pool and assign them to the
|
||||
hosts where that makes sense. You have the option of creating bastion hosts
|
||||
inside the private subnet to access the nodes there.
|
||||
|
||||
### Kubernetes Nodes
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating ip addresses or not.
|
||||
- Master Nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
- Kubernetes worker nodes
|
||||
|
||||
Note that the ansible script will report an invalid configuration if you wind up
|
||||
with an even number of etcd instances since that is not a valid configuration.
|
||||
|
||||
### Gluster FS
|
||||
The terraform configuration supports provisioning of an optional GlusterFS
|
||||
shared file system based on a separate set of VMs. To enable this, you need to
|
||||
specify
|
||||
- the number of gluster hosts
|
||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||
- Other properties related to provisioning the hosts
|
||||
|
||||
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
||||
need the GlusterFS VMs to be based on either Debian or RedHat based images,
|
||||
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
||||
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in glance
|
||||
- you already have a floating-ip pool created
|
||||
- you have security-groups enabled
|
||||
- you have a pair of keys generated that can be used to secure the new hosts
|
||||
|
||||
## Module Architecture
|
||||
The configuration is divided into three modules:
|
||||
- Network
|
||||
- IPs
|
||||
- Compute
|
||||
|
||||
The main reason for splitting the configuration up in this way is to easily
|
||||
accommodate situations where floating IPs are limited by a quota or if you have
|
||||
any external references to the floating IP (e.g. DNS) that would otherwise have
|
||||
to be updated.
|
||||
|
||||
You can force your existing IPs by modifying the compute variables in
|
||||
`kubespray.tf` as
|
||||
|
||||
```
|
||||
k8s_master_fips = ["151.101.129.67"]
|
||||
k8s_node_fips = ["151.101.129.68"]
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||
requirements.
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also
|
||||
used to deploy and provision the software requirements.
|
||||
|
||||
### Prep
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment
|
||||
variables. This can be done by downloading a credentials .rc file from your
|
||||
OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
```
|
||||
|
||||
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
|
||||
|
||||
You will need two networks before installing, an internal network and
|
||||
an external (floating IP Pool) network. The internet network can be shared as
|
||||
we use security groups to provide network segregation. Due to the many
|
||||
differences between OpenStack installs the Terraform does not attempt to create
|
||||
these for you.
|
||||
|
||||
By default Terraform will expect that your networks are called `internal` and
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||
|
||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||
|
||||
All OpenStack resources will use the Terraform variable `cluster_name` (
|
||||
default `example`) in their name to make it easier to track. For example the
|
||||
first compute resource will be named `example-kubernetes-1`.
|
||||
|
||||
#### Terraform
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into Terraform
|
||||
environment variables. Likely via a command similar to:
|
||||
|
||||
@@ -75,61 +101,106 @@ $ echo Setting up Terraform creds && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
##### Alternative: etcd inside masters
|
||||
### Terraform Variables
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
If you want to provision master or node VMs that don't use floating ips and where etcd is inside masters, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
The best way to set these values is to create a file in the project's root
|
||||
directory called something like`my-terraform-vars.tfvars`. Many of the
|
||||
variables are obvious. Here is a summary of some of the more interesting
|
||||
ones:
|
||||
|
||||
```
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "0"
|
||||
```
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||
|Variable | Description |
|
||||
|---------|-------------|
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|
||||
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|
||||
|`number_of_etcd` | Number of pure etcd nodes |
|
||||
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|
||||
##### Alternative: etcd on separate machines
|
||||
## Initializing Terraform
|
||||
Before Terraform can operate on your cluster you need to install required
|
||||
plugins. This is accomplished with the command
|
||||
|
||||
If you want to provision master or node VMs that don't use floating ips and where **etcd is on separate nodes from Kubernetes masters**, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
|
||||
```
|
||||
number_of_etcd = "3"
|
||||
number_of_k8s_masters = "0"
|
||||
number_of_k8s_masters_no_etcd = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "0"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "2"
|
||||
|
||||
flavor_k8s_node = "desired-flavor-id"
|
||||
flavor_k8s_master = "desired-flavor-id"
|
||||
flavor_etcd = "desired-flavor-id"
|
||||
```bash
|
||||
$ terraform init contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy), two VMs as nodes with floating ips, one VM as node without floating ip and three VMs for etcd.
|
||||
|
||||
##### Alternative: add GlusterFS
|
||||
|
||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||
|
||||
```
|
||||
# Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
|
||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||
# This is the name of an image already available in your openstack installation.
|
||||
image_gfs = "Ubuntu 15.10"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||
gfs_volume_size_in_gb = "50"
|
||||
# The user needed for the image choosen for GlusterFS.
|
||||
ssh_user_gfs = "ubuntu"
|
||||
## Provisioning Cluster with Terraform
|
||||
You can apply the terraform config to your cluster with the following command
|
||||
issued from the project's root directory
|
||||
```bash
|
||||
$ terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to
|
||||
be able to access your machines tunneling through the bastion's ip adress. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
pick it up automatically.
|
||||
|
||||
GlusterFS is not deployed by the standard `cluster.yml` playbook, see the [glusterfs playbook documentation](../../network-storage/glusterfs/README.md) for instructions.
|
||||
|
||||
# Configure Cluster variables
|
||||
## Destroying Cluster with Terraform
|
||||
You can destroy a config deployed to your cluster with the following command
|
||||
issued from the project's root directory
|
||||
```bash
|
||||
$ terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
Edit `inventory/group_vars/all.yml`:
|
||||
## Debugging Cluster Provisioning
|
||||
You can enable debugging output from Terraform by setting
|
||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before runing the terraform command
|
||||
|
||||
|
||||
# Running the Ansible Script
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
```
|
||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-etcd-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-k8s-master-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by
|
||||
CoreOS, these might have a state`FAILED` due to Container Linux by CoreOS not
|
||||
having python. As long as the state is not`UNREACHABLE`, this is fine.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be something as simple as a stale host key.
|
||||
|
||||
## Configure Cluster variables
|
||||
|
||||
Edit`inventory/group_vars/all.yml`:
|
||||
- Set variable **bootstrap_os** according selected image
|
||||
```
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
@@ -147,7 +218,7 @@ bin_dir: /opt/bin
|
||||
```
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit `inventory/group_vars/k8s-cluster.yml`:
|
||||
Edit`inventory/group_vars/k8s-cluster.yml`:
|
||||
- Set variable **kube_network_plugin** according selected networking
|
||||
```
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
@@ -168,63 +239,13 @@ resolvconf_mode: host_resolvconf
|
||||
|
||||
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
|
||||
If not using a tfvars file for your setup, then execute:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||
description: "" => "example - Kubernetes Master"
|
||||
name: "" => "example-k8s-master"
|
||||
rule.#: "" => "<computed>"
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
||||
|
||||
The state of your infrastructure has been saved to the path
|
||||
below. This state is required to modify and destroy your
|
||||
infrastructure, so keep it safe. To inspect the complete state
|
||||
use the `terraform show` command.
|
||||
|
||||
State path: contrib/terraform/openstack/terraform.tfstate
|
||||
```
|
||||
|
||||
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
```
|
||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-etcd-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-k8s-master-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
Deploy kubernetes:
|
||||
## Deploy kubernetes:
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||
```
|
||||
|
||||
# Set up local kubectl
|
||||
## Set up local kubectl
|
||||
1. Install kubectl on your workstation:
|
||||
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
2. Add route to internal IP of master node (if needed):
|
||||
@@ -245,16 +266,15 @@ ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
|
||||
6. Configure kubectl:
|
||||
5. Configure kubectl:
|
||||
```
|
||||
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
--certificate-authority=ca.pem
|
||||
--certificate-authority=ca.pem
|
||||
|
||||
kubectl config set-credentials default-admin \
|
||||
--certificate-authority=ca.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--client-certificate=admin.pem
|
||||
--client-certificate=admin.pem
|
||||
|
||||
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
kubectl config use-context default-system
|
||||
@@ -264,19 +284,24 @@ kubectl config use-context default-system
|
||||
kubectl version
|
||||
```
|
||||
|
||||
If you are using floating ip addresses then you may get this error:
|
||||
```
|
||||
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
||||
```
|
||||
|
||||
You can tell kubectl to ignore this condition by adding the
|
||||
`--insecure-skip-tls-verify` option.
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
[glusterfs playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
for instructions.
|
||||
|
||||
Basically you will install gluster as
|
||||
```bash
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
|
||||
# What's next
|
||||
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||
|
||||
# clean up:
|
||||
|
||||
```
|
||||
$ terraform destroy
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
||||
```
|
||||
|
||||
@@ -1,226 +1,55 @@
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
module "network" {
|
||||
source = "modules/network"
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
module "ips" {
|
||||
source = "modules/ips"
|
||||
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
floatingip_pool = "${var.floatingip_pool}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
router_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
module "compute" {
|
||||
source = "modules/compute"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||
public_key_path = "${var.public_key_path}"
|
||||
image = "${var.image}"
|
||||
image_gfs = "${var.image_gfs}"
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user_gfs = "${var.ssh_user_gfs}"
|
||||
flavor_k8s_master = "${var.flavor_k8s_master}"
|
||||
flavor_k8s_node = "${var.flavor_k8s_node}"
|
||||
flavor_etcd = "${var.flavor_etcd}"
|
||||
flavor_gfs_node = "${var.flavor_gfs_node}"
|
||||
network_name = "${var.network_name}"
|
||||
flavor_bastion = "${var.flavor_bastion}"
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index + var.number_of_k8s_masters)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,vault"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage"
|
||||
}
|
||||
volume {
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#output "msg" {
|
||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||
#}
|
||||
|
||||
280
contrib/terraform/openstack/modules/compute/main.tf
Normal file
280
contrib/terraform/openstack/modules/compute/main.tf
Normal file
@@ -0,0 +1,280 @@
|
||||
|
||||
|
||||
variable user_data {
|
||||
type = "string"
|
||||
default = <<EOF
|
||||
#cloud-config
|
||||
manage_etc_hosts: localhost
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
EOF
|
||||
}
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${chomp(file(var.public_key_path))}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "6443"
|
||||
to_port = "6443"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "#cloud-config\nmanage_etc_hosts: localhost\npackage_update: true\npackage_upgrade: true"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
83
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
83
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
@@ -0,0 +1,83 @@
|
||||
variable "cluster_name" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
}
|
||||
|
||||
variable "number_of_gfs_nodes_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "gfs_volume_size_in_gb" {
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
}
|
||||
|
||||
variable "image_gfs" {
|
||||
}
|
||||
|
||||
variable "ssh_user" {
|
||||
}
|
||||
|
||||
variable "ssh_user_gfs" {
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
}
|
||||
|
||||
|
||||
variable "network_id"{
|
||||
|
||||
}
|
||||
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "k8s_node_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
type = "list"
|
||||
}
|
||||
24
contrib/terraform/openstack/modules/ips/main.tf
Normal file
24
contrib/terraform/openstack/modules/ips/main.tf
Normal file
@@ -0,0 +1,24 @@
|
||||
|
||||
resource "null_resource" "dummy_dependency" {
|
||||
triggers {
|
||||
dependency_id = "${var.router_id}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "k8s_master_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
||||
}
|
||||
26
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
26
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
variable "number_of_k8s_masters" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
|
||||
}
|
||||
|
||||
variable "external_net" {
|
||||
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "router_id"{
|
||||
|
||||
}
|
||||
24
contrib/terraform/openstack/modules/network/main.tf
Normal file
24
contrib/terraform/openstack/modules/network/main.tf
Normal file
@@ -0,0 +1,24 @@
|
||||
|
||||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
admin_state_up = "true"
|
||||
external_gateway = "${var.external_net}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
cidr = "10.0.0.0/24"
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "router_id" {
|
||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
13
contrib/terraform/openstack/modules/network/variables.tf
Normal file
13
contrib/terraform/openstack/modules/network/variables.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
variable "external_net" {
|
||||
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
}
|
||||
|
||||
variable "dns_nameservers"{
|
||||
type = "list"
|
||||
}
|
||||
@@ -2,6 +2,10 @@ variable "cluster_name" {
|
||||
default = "example"
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 2
|
||||
}
|
||||
@@ -63,19 +67,28 @@ variable "ssh_user_gfs" {
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
@@ -84,11 +97,21 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "dns_nameservers"{
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = "list"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "external_net" {
|
||||
description = "uuid of the external/public network"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
description = "Your openstack username"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# Copyright 2015 Cisco Systems, Inc.
|
||||
#
|
||||
@@ -70,6 +70,14 @@ def iterhosts(resources):
|
||||
yield parser(resource, module_name)
|
||||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
def parses(prefix):
|
||||
def inner(func):
|
||||
PARSERS[prefix] = func
|
||||
@@ -298,6 +306,17 @@ def softlayer_host(resource, module_name):
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
@@ -343,6 +362,8 @@ def openstack_host(resource, module_name):
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
|
||||
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
@@ -656,6 +677,19 @@ def clc_server(resource, module_name):
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
'public_ipv4': ip,
|
||||
'ansible_ssh_host': ip,
|
||||
})
|
||||
yield host
|
||||
|
||||
|
||||
## QUERY TYPES
|
||||
def query_host(hosts, target):
|
||||
@@ -727,6 +761,13 @@ def main():
|
||||
parser.exit()
|
||||
|
||||
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||
|
||||
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
|
||||
ips = dict(iterips(iterresources(tfstates(args.root))))
|
||||
|
||||
if ips:
|
||||
hosts = iter_host_ips(hosts, ips)
|
||||
|
||||
if args.list:
|
||||
output = query_list(hosts)
|
||||
if args.nometa:
|
||||
|
||||
74
docs/contiv.md
Normal file
74
docs/contiv.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Contiv
|
||||
======
|
||||
|
||||
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
||||
|
||||
## Administrate Contiv
|
||||
|
||||
There are two ways to manage Contiv:
|
||||
|
||||
* a web UI managed by the api proxy service
|
||||
* a CLI named `netctl`
|
||||
|
||||
|
||||
### Interfaces
|
||||
|
||||
#### The Web Interface
|
||||
|
||||
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
|
||||
|
||||
You can configure the api proxy by overriding the following variables:
|
||||
|
||||
```yaml
|
||||
contiv_enable_api_proxy: true
|
||||
contiv_api_proxy_port: 10000
|
||||
contiv_generate_certificate: true
|
||||
```
|
||||
|
||||
The default credentials to log in are: admin/admin.
|
||||
|
||||
|
||||
#### The Command Line Interface
|
||||
|
||||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||
|
||||
```bash
|
||||
export NETMASTER=http://127.0.0.1:9999
|
||||
```
|
||||
|
||||
The port can be changed by overriding the following variable:
|
||||
|
||||
```yaml
|
||||
contiv_netmaster_port: 9999
|
||||
```
|
||||
|
||||
The CLI doesn't use the authentication process needed by the web interface.
|
||||
|
||||
|
||||
### Network configuration
|
||||
|
||||
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||
|
||||
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
|
||||
* `default-net` : the default network that hosts pods.
|
||||
|
||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||
|
||||
The default forward mode is set to routing:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: routing
|
||||
```
|
||||
|
||||
The following is an example of how you can use VLAN instead of VXLAN:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: bridge
|
||||
contiv_vlan_interface: eth0
|
||||
contiv_networks:
|
||||
- name: default-net
|
||||
subnet: "{{ kube_pods_subnet }}"
|
||||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
encap: vlan
|
||||
pkt_tag: 10
|
||||
```
|
||||
@@ -26,13 +26,13 @@ Debian Jessie installation Notes:
|
||||
```
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt.get install ansible
|
||||
sudo apt-get install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo apt-get install phyton-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
|
||||
@@ -50,7 +50,7 @@ DNS modes supported by Kubespray
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
## dns_mode
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||
|
||||
#### dnsmasq_kubedns (default)
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
@@ -62,6 +62,12 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
#### manual
|
||||
This does not install dnsmasq or kubedns, but allows you to specify
|
||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||
Use this method if you plan to install your own DNS server in the cluster after
|
||||
initial deployment.
|
||||
|
||||
#### none
|
||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||
leaves you with a non functional cluster.
|
||||
|
||||
@@ -75,7 +75,7 @@ kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||
More details on this process are in the [HA guide](ha.md).
|
||||
More details on this process are in the [HA guide](ha-mode.md).
|
||||
|
||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||
kube-master host on port 6443 by default. However, this requires
|
||||
@@ -93,18 +93,19 @@ the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-applicati
|
||||
Accessing Kubernetes Dashboard
|
||||
------------------------------
|
||||
|
||||
If the variable `dashboard_enabled` is set (default is true) as well as
|
||||
kube_basic_auth (default is false), then you can
|
||||
access the Kubernetes Dashboard at the following URL:
|
||||
As of kubernetes-dashboard v1.7.x:
|
||||
* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||
* Requires RBAC in authorization_modes
|
||||
* Only serves over https
|
||||
* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
|
||||
|
||||
https://kube:_kube-password_@_host_:6443/ui/
|
||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||
https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
||||
|
||||
To see the password, refer to the section above, titled *Connecting to
|
||||
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
|
||||
(when enabled).
|
||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
||||
|
||||
To access the Dashboard with basic auth disabled, follow the instructions here:
|
||||
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#command-line-proxy
|
||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
|
||||
|
||||
Accessing Kubernetes API
|
||||
------------------------
|
||||
|
||||
@@ -27,19 +27,21 @@ non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient. This option is
|
||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||
You may also define the port the local internal loadbalancer uses by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||
It is also important to note that Kubespray will only configure kubelet and kube-proxy
|
||||
on non-master nodes to use the local internal loadbalancer.
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of
|
||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||
loadbalancer.
|
||||
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||
a user and is not covered by ansible roles in Kubespray. By default, it only configures
|
||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||
apiserver is directed.
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||
configure your own loadbalancer to achieve HA. Note that deploying a
|
||||
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
||||
By default, it only configures a non-HA endpoint, which points to the
|
||||
`access_ip` or IP address of the first server node in the `kube-master` group.
|
||||
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||
The following diagram shows how traffic to the apiserver is directed.
|
||||
|
||||

|
||||
|
||||
@@ -66,40 +68,72 @@ listen kubernetes-apiserver-https
|
||||
balance roundrobin
|
||||
```
|
||||
|
||||
And the corresponding example global vars config:
|
||||
Note: That's an example config managed elsewhere outside of Kubespray.
|
||||
|
||||
And the corresponding example global vars for such a "cluster-aware"
|
||||
external LB with the cluster API access modes configured in Kubespray:
|
||||
```
|
||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
||||
loadbalancer_apiserver:
|
||||
address: <VIP>
|
||||
port: 8383
|
||||
```
|
||||
|
||||
Note: The default kubernetes apiserver configuration binds to all interfaces,
|
||||
so you will need to use a different port for the vip from that the API is
|
||||
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
||||
listens on a specific interface (to avoid conflict with haproxy binding the
|
||||
port on the VIP adddress)
|
||||
|
||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
||||
into the generated self-signed TLS/SSL certificates as well. Note that
|
||||
the HAProxy service should as well be HA and requires a VIP management, which
|
||||
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||
localhost LB configuration.
|
||||
is out of scope of this doc.
|
||||
|
||||
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
There is a special case for an internal and an externally configured (not with
|
||||
Kubespray) LB used simultaneously. Keep in mind that the cluster is not aware
|
||||
of such an external LB and you need no to specify any configuration variables
|
||||
for it.
|
||||
|
||||
Access endpoints are evaluated automagically, as the following:
|
||||
Note: TLS/SSL termination for externally accessed API endpoints' will **not**
|
||||
be covered by Kubespray for that case. Make sure your external LB provides it.
|
||||
Alternatively you may specify an externally load balanced VIPs in the
|
||||
`supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into
|
||||
the generated cluster certifactes as well.
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB (default) | http://lc:p | https://lc:nsp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB | http://lc:p | https://m[0].aip:sp |
|
||||
Aside of that specific case, the `loadbalancer_apiserver` considered mutually
|
||||
exclusive to `loadbalancer_apiserver_localhost`.
|
||||
|
||||
Access API endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master | external |
|
||||
|------------------------------|----------------|---------------------|---------------------|
|
||||
| Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
|
||||
| Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
|
||||
| External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
|
||||
|
||||
Where:
|
||||
* `m[0]` - the first node in the `kube-master` group;
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||
* `lc` - localhost;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
||||
|
||||
A second and a third column represent internal cluster access modes. The last
|
||||
column illustrates an example URI to access the cluster APIs externally.
|
||||
Kubespray has nothing to do with it, this is informational only.
|
||||
|
||||
As you can see, the masters' internal API endpoints are always
|
||||
contacted via the local bind IP, which is `https://bip:sp`.
|
||||
|
||||
**Note** that for some cases, like healthchecks of applications deployed by
|
||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||
`kube_apiserver_insecure_port`.
|
||||
|
||||
@@ -84,7 +84,7 @@ Other members of your team should use ```git submodule sync```, ```git submodule
|
||||
# Contributing
|
||||
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||
|
||||
0. Sign the [CNCF CLA](https://github.com/kubernetes/kubernetes/wiki/CLA-FAQ).
|
||||
0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
||||
|
||||
1. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
|
||||
@@ -34,6 +34,9 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
||||
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||
|
||||
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||
``kube_service_addresses`` and ``kube_pods_subnet``.
|
||||
|
||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||
from host/network interruption much quicker with calico-rr. Note that
|
||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||
|
||||
67
docs/local-storage-provisioner.md
Normal file
67
docs/local-storage-provisioner.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Local Storage Provisioner
|
||||
|
||||
The local storage provisioner is NOT a dynamic storage provisioner as you would
|
||||
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
||||
all manually created volumes located in the directory `local_volume_base_dir`.
|
||||
The default path is /mnt/disks and the rest of this doc will use that path as
|
||||
an example.
|
||||
|
||||
## Examples to create local storage volumes
|
||||
|
||||
### tmpfs method:
|
||||
|
||||
```
|
||||
for vol in vol1 vol2 vol3; do
|
||||
mkdir /mnt/disks/$vol
|
||||
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
||||
done
|
||||
```
|
||||
|
||||
The tmpfs method is not recommended for production because the mount is not
|
||||
persistent and data will be deleted on reboot.
|
||||
|
||||
### Mount physical disks
|
||||
|
||||
```
|
||||
mkdir /mnt/disks/ssd1
|
||||
mount /dev/vdb1 /mnt/disks/ssd1
|
||||
```
|
||||
|
||||
Physical disks are recommended for production environments because it offers
|
||||
complete isolation in terms of I/O and capacity.
|
||||
|
||||
### File-backed sparsefile method
|
||||
|
||||
```
|
||||
truncate /mnt/disks/disk5 --size 2G
|
||||
mkfs.ext4 /mnt/disks/disk5
|
||||
mkdir /mnt/disks/vol5
|
||||
mount /mnt/disks/disk5 /mnt/disks/vol5
|
||||
```
|
||||
|
||||
If you have a development environment and only one disk, this is the best way
|
||||
to limit the quota of persistent volumes.
|
||||
|
||||
### Simple directories
|
||||
```
|
||||
for vol in vol6 vol7 vol8; do
|
||||
mkdir /mnt/disks/$vol
|
||||
done
|
||||
```
|
||||
|
||||
This is also acceptable in a development environment, but there is no capacity
|
||||
management.
|
||||
|
||||
## Usage notes
|
||||
|
||||
The volume provisioner cannot calculate volume sizes correctly, so you should
|
||||
delete the daemonset pod on the relevant host after creating volumes. The pod
|
||||
will be recreated and read the size correctly.
|
||||
|
||||
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
|
||||
CoreOS/Container Linux). Pods with persistent volume claims will not be
|
||||
able to start if the mounts become unavailable.
|
||||
|
||||
## Further reading
|
||||
|
||||
Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume
|
||||
@@ -24,7 +24,7 @@ hardcoded to only create a Vault role for Etcd.
|
||||
This step is where the long-term Vault cluster is started and configured. Its
|
||||
first task, is to stop any temporary instances of Vault, to free the port for
|
||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||
and read to go.
|
||||
and ready to go.
|
||||
|
||||
Keys to the Kingdom
|
||||
-------------------
|
||||
|
||||
@@ -34,10 +34,12 @@ Then, in the same file, you need to declare your vCenter credential following th
|
||||
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
|
||||
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) |
|
||||
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||
| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) |
|
||||
|
||||
Example configuration
|
||||
|
||||
```yml
|
||||
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
vsphere_vcenter_port: 443
|
||||
@@ -48,6 +50,7 @@ vsphere_datacenter: "DATACENTER_name"
|
||||
vsphere_datastore: "DATASTORE_name"
|
||||
vsphere_working_dir: "Docker_hosts"
|
||||
vsphere_scsi_controller_type: "pvscsi"
|
||||
vsphere_resource_pool: "K8s-Pool"
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
@@ -91,7 +91,7 @@ weave_peers: uninitialized
|
||||
|
||||
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||
|
||||
The seconde variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||
The second variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||
|
||||
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ bin_dir: /usr/local/bin
|
||||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using nova-client before starting the playbook.
|
||||
#cloud_provider:
|
||||
@@ -74,12 +74,17 @@ bin_dir: /usr/local/bin
|
||||
#azure_vnet_name:
|
||||
#azure_route_table_name:
|
||||
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
#openstack_lbaas_enabled: True
|
||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
## Override default LBaaS behavior
|
||||
#openstack_lbaas_use_octavia: False
|
||||
#openstack_lbaas_method: "ROUND_ROBIN"
|
||||
#openstack_lbaas_provider: "haproxy"
|
||||
#openstack_lbaas_create_monitor: "yes"
|
||||
#openstack_lbaas_monitor_delay: "1m"
|
||||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
|
||||
@@ -8,9 +8,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
system_namespace: kube-system
|
||||
|
||||
# Logging directory (sysvinit systems)
|
||||
kube_log_dir: "/var/log/kubernetes"
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
|
||||
@@ -20,10 +17,10 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
# This is where to save basic auth file
|
||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
|
||||
kube_api_anonymous_auth: false
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.8.1
|
||||
kube_version: v1.9.2
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -65,7 +62,7 @@ kube_users:
|
||||
# kube_oidc_groups_claim: groups
|
||||
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Choose network plugin (calico, contiv, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
@@ -106,14 +103,19 @@ kube_network_node_prefix: 24
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
#kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
# DNS configuration.
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# Can be dnsmasq_kubedns, kubedns or none
|
||||
# Can be dnsmasq_kubedns, kubedns, manual or none
|
||||
dns_mode: kubedns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
#manual_dns_server: 10.x.x.x
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
@@ -138,11 +140,13 @@ docker_bin_dir: "/usr/bin"
|
||||
etcd_deployment_type: docker
|
||||
kubelet_deployment_type: host
|
||||
vault_deployment_type: docker
|
||||
helm_deployment_type: host
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
|
||||
# Monitoring apps for k8s
|
||||
@@ -151,9 +155,15 @@ efk_enabled: false
|
||||
# Helm deployment
|
||||
helm_enabled: false
|
||||
|
||||
# Istio depoyment
|
||||
# Istio deployment
|
||||
istio_enabled: false
|
||||
|
||||
# Local volume provisioner deployment
|
||||
local_volumes_enabled: false
|
||||
|
||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||
persistent_volumes_enabled: false
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
||||
# kubeconfig_localhost: false
|
||||
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
||||
@@ -168,9 +178,14 @@ istio_enabled: false
|
||||
# kubelet_cgroups_per_qos: true
|
||||
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
|
||||
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||
## That can be usefull for example to setup a keepalived virtual IP
|
||||
## That can be useful for example to setup a keepalived virtual IP
|
||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||
|
||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
||||
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
||||
## Set this variable to true to get rid of this issue
|
||||
volume_cross_zone_attachment: false
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
vars_prompt:
|
||||
name: "reset_confirmation"
|
||||
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
|
||||
|
||||
@@ -16,7 +16,5 @@ Host {{ bastion_ip }}
|
||||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
|
||||
StrictHostKeyChecking no
|
||||
ProxyCommand ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
---
|
||||
pypy_version: 2.4.0
|
||||
pip_python_modules:
|
||||
pip_python_coreos_modules:
|
||||
- httplib2
|
||||
- six
|
||||
- six
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#/bin/bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BINDIR="/opt/bin"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
raw: stat /opt/bin/.bootstrapped
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
tags:
|
||||
- facts
|
||||
|
||||
@@ -51,4 +52,4 @@
|
||||
- name: Install required python modules
|
||||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_modules}}"
|
||||
with_items: "{{pip_python_coreos_modules}}"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
raw: which "{{ item }}"
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
with_items:
|
||||
- python
|
||||
- pip
|
||||
@@ -16,7 +17,7 @@
|
||||
apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
|
||||
when:
|
||||
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"
|
||||
need_bootstrap.results | map(attribute='rc') | sort | last | bool
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
raw: which "{{ item }}"
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
with_items:
|
||||
- python
|
||||
- pip
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
---
|
||||
- include: bootstrap-ubuntu.yml
|
||||
- import_tasks: bootstrap-ubuntu.yml
|
||||
when: bootstrap_os == "ubuntu"
|
||||
|
||||
- include: bootstrap-debian.yml
|
||||
- import_tasks: bootstrap-debian.yml
|
||||
when: bootstrap_os == "debian"
|
||||
|
||||
- include: bootstrap-coreos.yml
|
||||
- import_tasks: bootstrap-coreos.yml
|
||||
when: bootstrap_os == "coreos"
|
||||
|
||||
- include: bootstrap-centos.yml
|
||||
- import_tasks: bootstrap-centos.yml
|
||||
when: bootstrap_os == "centos"
|
||||
|
||||
- include: setup-pipelining.yml
|
||||
- import_tasks: setup-pipelining.yml
|
||||
|
||||
- name: check if atomic host
|
||||
stat:
|
||||
|
||||
@@ -39,7 +39,7 @@ spec:
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
|
||||
image: "{{ dnsmasqautoscaler_image_repo }}:{{ dnsmasqautoscaler_image_tag }}"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
docker_version: '1.13'
|
||||
docker_version: '17.03'
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
@@ -16,3 +16,5 @@ docker_container_storage_setup: false
|
||||
|
||||
docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||
docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
docker_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||
docker_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
||||
|
||||
@@ -3,6 +3,9 @@ docker_container_storage_setup_version: v0.6.0
|
||||
docker_container_storage_setup_profile_name: kubespray
|
||||
docker_container_storage_setup_storage_driver: devicemapper
|
||||
docker_container_storage_setup_container_thinpool: docker-pool
|
||||
#It must be define a disk path for docker_container_storage_setup_devs.
|
||||
#Otherwise docker-storage-setup will be executed incorrectly.
|
||||
#docker_container_storage_setup_devs: /dev/vdb
|
||||
docker_container_storage_setup_data_size: 40%FREE
|
||||
docker_container_storage_setup_min_data_size: 2G
|
||||
docker_container_storage_setup_chunk_size: 512K
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
copy:
|
||||
dest: /etc/systemd/system/docker.service.d/override.conf
|
||||
content: |-
|
||||
### Thie file is managed by Ansible
|
||||
### This file is managed by Ansible
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||
|
||||
@@ -31,6 +31,12 @@
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
#https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
|
||||
- name: docker-storage-setup | install lvm2
|
||||
yum:
|
||||
name: lvm2
|
||||
state: present
|
||||
|
||||
- name: docker-storage-setup | install and run container-storage-setup
|
||||
become: yes
|
||||
script: install_container_storage_setup.sh {{ docker_container_storage_setup_version }} {{ docker_container_storage_setup_profile_name }}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- include: set_facts_dns.yml
|
||||
- include_tasks: set_facts_dns.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
tags:
|
||||
- facts
|
||||
@@ -34,13 +34,12 @@
|
||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
id: "{{item}}"
|
||||
keyserver: "{{docker_repo_key_info.keyserver}}"
|
||||
url: "{{docker_repo_key_info.url}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
|
||||
|
||||
@@ -68,20 +67,28 @@
|
||||
until: docker_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
notify: restart docker
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
- name: flush handlers so we can wait for docker to come up
|
||||
meta: flush_handlers
|
||||
|
||||
- name: set fact for docker_version
|
||||
command: "docker version -f '{{ '{{' }}.Client.Version{{ '}}' }}'"
|
||||
register: docker_version
|
||||
failed_when: docker_version.stdout|version_compare('1.12', '<')
|
||||
register: installed_docker_version
|
||||
changed_when: false
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
fail:
|
||||
msg: "You need at least docker version >= 1.12 for resolvconf_mode=docker_dns"
|
||||
when: >
|
||||
dns_mode != 'none' and
|
||||
resolvconf_mode == 'docker_dns' and
|
||||
installed_docker_version.stdout|version_compare('1.12', '<')
|
||||
|
||||
- name: Set docker systemd config
|
||||
include: systemd.yml
|
||||
import_tasks: systemd.yml
|
||||
|
||||
- name: ensure docker service is started and enabled
|
||||
service:
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
{{ [ skydns_server ] }}
|
||||
{%- elif dns_mode == 'dnsmasq_kubedns' -%}
|
||||
{{ [ dnsmasq_dns_server ] }}
|
||||
{%- elif dns_mode == 'manual' -%}
|
||||
{{ [ manual_dns_server ] }}
|
||||
{%- endif -%}
|
||||
|
||||
- name: set base docker dns facts
|
||||
@@ -47,7 +49,7 @@
|
||||
|
||||
- name: add system search domains to docker options
|
||||
set_fact:
|
||||
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}"
|
||||
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}"
|
||||
when: system_search_domains.stdout != ""
|
||||
|
||||
- name: check number of nameservers
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||
notify: restart docker
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
- name: get systemd version
|
||||
|
||||
@@ -18,7 +18,7 @@ Environment=GOTRACEBACK=crash
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
ExecStart={{ docker_bin_dir }}/docker daemon \
|
||||
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
|
||||
$DOCKER_OPTS \
|
||||
$DOCKER_STORAGE_OPTIONS \
|
||||
$DOCKER_NETWORK_OPTIONS \
|
||||
|
||||
@@ -7,8 +7,9 @@ docker_versioned_pkg:
|
||||
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-engine=17.03.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-engine=17.03.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-engine=17.05.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
@@ -18,7 +19,7 @@ docker_package_info:
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
url: '{{ docker_apt_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
@@ -26,6 +27,6 @@ docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
deb {{ docker_apt_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
||||
@@ -8,8 +8,9 @@ docker_kernel_min_version: '0'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker
|
||||
'1.11': docker-1:1.11.2
|
||||
'1.12': docker-1:1.12.5
|
||||
'1.12': docker-1:1.12.6
|
||||
'1.13': docker-1.13.1
|
||||
'17.03': docker-17.03.1
|
||||
'stable': docker-ce
|
||||
'edge': docker-ce-edge
|
||||
|
||||
|
||||
@@ -8,8 +8,9 @@ docker_versioned_pkg:
|
||||
'1.11': docker-engine-1.11.2-1.el7.centos
|
||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||
'1.13': docker-engine-1.13.1-1.el7.centos
|
||||
'stable': docker-engine-17.03.0.ce-1.el7.centos
|
||||
'edge': docker-engine-17.03.0.ce-1.el7.centos
|
||||
'17.03': docker-engine-17.03.1.ce-1.el7.centos
|
||||
'stable': docker-engine-17.03.1.ce-1.el7.centos
|
||||
'edge': docker-engine-17.05.0.ce-1.el7.centos
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
|
||||
|
||||
@@ -7,8 +7,9 @@ docker_versioned_pkg:
|
||||
'1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-engine=17.03.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-engine=17.03.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-engine=17.05.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
@@ -18,7 +19,7 @@ docker_package_info:
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
url: '{{ docker_apt_repo_gpgkey }}'
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
@@ -26,6 +27,6 @@ docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
deb {{ docker_apt_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
||||
@@ -24,38 +24,33 @@ download_always_pull: False
|
||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
||||
|
||||
# Versions
|
||||
kube_version: v1.8.1
|
||||
kube_version: v1.9.2
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
etcd_version: v3.2.4
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: "v2.5.0"
|
||||
calico_ctl_version: "v1.5.0"
|
||||
calico_cni_version: "v1.10.0"
|
||||
calico_policy_version: "v0.7.0"
|
||||
calico_version: "v2.6.2"
|
||||
calico_ctl_version: "v1.6.1"
|
||||
calico_cni_version: "v1.11.0"
|
||||
calico_policy_version: "v1.0.0"
|
||||
calico_rr_version: "v0.4.0"
|
||||
weave_version: 2.0.4
|
||||
flannel_version: "v0.8.0"
|
||||
flannel_cni_version: "v0.2.0"
|
||||
flannel_version: "v0.9.1"
|
||||
flannel_cni_version: "v0.3.0"
|
||||
istio_version: "0.2.6"
|
||||
vault_version: 0.8.1
|
||||
weave_version: 2.1.3
|
||||
pod_infra_version: 3.0
|
||||
contiv_version: 1.1.7
|
||||
|
||||
# Download URLs
|
||||
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
||||
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm"
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
|
||||
|
||||
# Checksums
|
||||
kubeadm_checksum: "93246027cc225b4fd7ec57bf1f562dbc78f2ed9f2b77a1468976c266a104cf4d"
|
||||
|
||||
istio_version: "0.2.6"
|
||||
|
||||
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
||||
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
|
||||
|
||||
vault_version: 0.8.1
|
||||
kubeadm_checksum: 560b44a2b91747f4fb64ac8754fcf65db9a39a84c6b54d4e6483400ac6c674fc
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
|
||||
|
||||
# Containers
|
||||
etcd_image_repo: "quay.io/coreos/etcd"
|
||||
@@ -70,7 +65,7 @@ calico_node_image_repo: "quay.io/calico/node"
|
||||
calico_node_image_tag: "{{ calico_version }}"
|
||||
calico_cni_image_repo: "quay.io/calico/cni"
|
||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||
calico_policy_image_repo: "quay.io/calico/kube-policy-controller"
|
||||
calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||
@@ -89,20 +84,27 @@ weave_kube_image_repo: "weaveworks/weave-kube"
|
||||
weave_kube_image_tag: "{{ weave_version }}"
|
||||
weave_npc_image_repo: "weaveworks/weave-npc"
|
||||
weave_npc_image_tag: "{{ weave_version }}"
|
||||
contiv_image_repo: "contiv/netplugin"
|
||||
contiv_image_tag: "{{ contiv_version }}"
|
||||
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
||||
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
||||
|
||||
nginx_image_repo: nginx
|
||||
nginx_image_tag: 1.11.4-alpine
|
||||
nginx_image_tag: 1.13
|
||||
dnsmasq_version: 2.78
|
||||
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||
kubedns_version: 1.14.5
|
||||
kubedns_version: 1.14.8
|
||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
||||
kubedns_image_tag: "{{ kubedns_version }}"
|
||||
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
|
||||
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
|
||||
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
|
||||
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
|
||||
kubednsautoscaler_version: 1.1.1
|
||||
dnsmasqautoscaler_version: 1.1.2
|
||||
dnsmasqautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
|
||||
dnsmasqautoscaler_image_tag: "{{ dnsmasqautoscaler_version }}"
|
||||
kubednsautoscaler_version: 1.1.2
|
||||
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
|
||||
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
|
||||
test_image_repo: busybox
|
||||
@@ -117,12 +119,13 @@ kibana_version: "v4.6.1"
|
||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||
kibana_image_tag: "{{ kibana_version }}"
|
||||
|
||||
helm_version: "v2.2.2"
|
||||
helm_version: "v2.7.2"
|
||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||
helm_image_tag: "{{ helm_version }}"
|
||||
tiller_version: "{{ helm_version }}"
|
||||
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
||||
tiller_image_tag: "{{ tiller_version }}"
|
||||
tiller_image_tag: "{{ helm_version }}"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
|
||||
downloads:
|
||||
netcheck_server:
|
||||
@@ -225,6 +228,18 @@ downloads:
|
||||
repo: "{{ weave_npc_image_repo }}"
|
||||
tag: "{{ weave_npc_image_tag }}"
|
||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||
contiv:
|
||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||
container: true
|
||||
repo: "{{ contiv_image_repo }}"
|
||||
tag: "{{ contiv_image_tag }}"
|
||||
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
||||
contiv_auth_proxy:
|
||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||
container: true
|
||||
repo: "{{ contiv_auth_proxy_image_repo }}"
|
||||
tag: "{{ contiv_auth_proxy_image_tag }}"
|
||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||
pod_infra:
|
||||
enabled: true
|
||||
container: true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: container_download | Make download decision if pull is required by tag or sha256
|
||||
include: set_docker_image_facts.yml
|
||||
include_tasks: set_docker_image_facts.yml
|
||||
delegate_to: "{{ download_delegate if download_run_once or omit }}"
|
||||
delegate_facts: no
|
||||
run_once: "{{ download_run_once }}"
|
||||
@@ -10,17 +10,31 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: container_download | Download containers if pull is required or told to always pull
|
||||
# FIXME(mattymo): In Ansible 2.4 omitting download delegate is broken. Move back
|
||||
# to one task in the future.
|
||||
- name: container_download | Download containers if pull is required or told to always pull (delegate)
|
||||
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
||||
register: pull_task_result
|
||||
until: pull_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
when:
|
||||
- download_run_once
|
||||
- download.enabled
|
||||
- download.container
|
||||
- pull_required|default(download_always_pull)
|
||||
delegate_to: "{{ download_delegate }}"
|
||||
delegate_facts: yes
|
||||
run_once: yes
|
||||
|
||||
- name: container_download | Download containers if pull is required or told to always pull (all nodes)
|
||||
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
||||
register: pull_task_result
|
||||
until: pull_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- not download_run_once
|
||||
- download.enabled
|
||||
- download.container
|
||||
- pull_required|default(download_always_pull)
|
||||
delegate_to: "{{ download_delegate if download_run_once or omit }}"
|
||||
delegate_facts: no
|
||||
run_once: "{{ download_run_once }}"
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
when:
|
||||
- download.enabled
|
||||
- download.file
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
- include: download_prep.yml
|
||||
- include_tasks: download_prep.yml
|
||||
when:
|
||||
- not skip_downloads|default(false)
|
||||
|
||||
- name: "Download items"
|
||||
include: "download_{% if download.container %}container{% else %}file{% endif %}.yml"
|
||||
include_tasks: "download_{% if download.container %}container{% else %}file{% endif %}.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(item.value) }}"
|
||||
with_dict: "{{ downloads }}"
|
||||
@@ -13,7 +13,7 @@
|
||||
- item.value.enabled
|
||||
|
||||
- name: "Sync container"
|
||||
include: sync_container.yml
|
||||
include_tasks: sync_container.yml
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(item.value) }}"
|
||||
with_dict: "{{ downloads }}"
|
||||
|
||||
@@ -1,4 +1,15 @@
|
||||
---
|
||||
- name: container_download | Make download decision if pull is required by tag or sha256
|
||||
include: set_docker_image_facts.yml
|
||||
delegate_to: "{{ download_delegate if download_run_once or omit }}"
|
||||
delegate_facts: no
|
||||
run_once: "{{ download_run_once }}"
|
||||
when:
|
||||
- download.enabled
|
||||
- download.container
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- set_fact:
|
||||
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
|
||||
run_once: true
|
||||
|
||||
@@ -8,6 +8,13 @@ etcd_data_dir: "/var/lib/etcd"
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
||||
etcd_cert_group: root
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
|
||||
- "etcd.{{ system_namespace }}.svc"
|
||||
- "etcd.{{ system_namespace }}"
|
||||
- "etcd"
|
||||
|
||||
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||
|
||||
@@ -30,3 +37,6 @@ etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr',
|
||||
etcd_compaction_retention: "8"
|
||||
|
||||
etcd_vault_mount_path: etcd
|
||||
|
||||
# Force clients like etcdctl to use TLS certs (different than peer security)
|
||||
etcd_secure_client: true
|
||||
|
||||
@@ -48,5 +48,7 @@
|
||||
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
||||
environment:
|
||||
ETCDCTL_API: 3
|
||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
retries: 3
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
- reload etcd
|
||||
- wait for etcd up
|
||||
|
||||
- include: backup.yml
|
||||
- import_tasks: backup.yml
|
||||
|
||||
- name: etcd | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
@@ -22,6 +22,8 @@
|
||||
uri:
|
||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||
validate_certs: no
|
||||
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||
register: result
|
||||
until: result.status is defined and result.status == 200
|
||||
retries: 10
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Configure | Check if member is in cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
@@ -8,6 +8,9 @@
|
||||
when: is_etcd_master
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Install etcd launch script
|
||||
template:
|
||||
@@ -27,7 +30,7 @@
|
||||
notify: restart etcd
|
||||
|
||||
- name: Configure | Join member(s) to cluster one at a time
|
||||
include: join_member.yml
|
||||
include_tasks: join_member.yml
|
||||
vars:
|
||||
target_node: "{{ item }}"
|
||||
loop_control:
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
---
|
||||
- include: sync_etcd_master_certs.yml
|
||||
- include_tasks: sync_etcd_master_certs.yml
|
||||
when: inventory_hostname in groups.etcd
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
- include: sync_etcd_node_certs.yml
|
||||
- include_tasks: sync_etcd_node_certs.yml
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
# Issue master certs to Etcd nodes
|
||||
- include: ../../vault/tasks/shared/issue_cert.yml
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}"
|
||||
issue_cert_alt_names: "{{ groups['etcd'] + ['localhost'] + (etcd_cert_alt_names)|default() }}"
|
||||
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
@@ -37,7 +37,7 @@
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
# Issue node certs to everyone else
|
||||
- include: ../../vault/tasks/shared/issue_cert.yml
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: Install | Copy etcdctl binary from rkt container
|
||||
command: >-
|
||||
@@ -24,3 +25,4 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
---
|
||||
- name: Join Member | Add member to cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include: refresh_config.yml
|
||||
- include_tasks: refresh_config.yml
|
||||
vars:
|
||||
etcd_peer_addresses: >-
|
||||
{% for host in groups['etcd'] -%}
|
||||
@@ -32,10 +35,13 @@
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
tags:
|
||||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
---
|
||||
- include: check_certs.yml
|
||||
- include_tasks: check_certs.yml
|
||||
when: cert_management == "script"
|
||||
tags:
|
||||
- etcd-secrets
|
||||
- facts
|
||||
|
||||
- include: "gen_certs_{{ cert_management }}.yml"
|
||||
- include_tasks: "gen_certs_{{ cert_management }}.yml"
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
- include: upd_ca_trust.yml
|
||||
- include_tasks: upd_ca_trust.yml
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
@@ -24,18 +24,18 @@
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||
|
||||
- include: "install_{{ etcd_deployment_type }}.yml"
|
||||
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
||||
when: is_etcd_master
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- include: set_cluster_health.yml
|
||||
- import_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- include: configure.yml
|
||||
- import_tasks: configure.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- include: refresh_config.yml
|
||||
- import_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Restart etcd if certs changed
|
||||
@@ -56,8 +56,8 @@
|
||||
# After etcd cluster is assembled, make sure that
|
||||
# initial state of the cluster is in `existing`
|
||||
# state insted of `new`.
|
||||
- include: set_cluster_health.yml
|
||||
- import_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- include: refresh_config.yml
|
||||
- import_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Configure | Check if cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
@@ -8,3 +8,6 @@
|
||||
when: is_etcd_master
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"member-" + inventory_hostname + ".pem"
|
||||
] }}
|
||||
|
||||
- include: ../../vault/tasks/shared/sync_file.yml
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
@@ -26,7 +26,7 @@
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include: ../../vault/tasks/shared/sync_file.yml
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
set_fact:
|
||||
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include: ../../vault/tasks/shared/sync_file.yml
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
@@ -22,7 +22,7 @@
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include: ../../vault/tasks/shared/sync_file.yml
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
|
||||
@@ -18,6 +18,8 @@ ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
|
||||
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||
ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
|
||||
ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
|
||||
ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}}
|
||||
|
||||
ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
|
||||
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
|
||||
|
||||
@@ -27,10 +27,14 @@ DNS.1 = localhost
|
||||
{% for host in groups['etcd'] %}
|
||||
DNS.{{ 1 + loop.index }} = {{ host }}
|
||||
{% endfor %}
|
||||
{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
|
||||
{% if loadbalancer_apiserver is defined %}
|
||||
{% set idx = groups['etcd'] | length | int + 2 %}
|
||||
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
|
||||
{% endif %}
|
||||
{% set idx = groups['etcd'] | length | int + 3 %}
|
||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||
DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
|
||||
{% endfor %}
|
||||
{% for host in groups['etcd'] %}
|
||||
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
||||
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Versions
|
||||
kubedns_version: 1.14.5
|
||||
kubednsautoscaler_version: 1.1.1
|
||||
kubedns_version: 1.14.8
|
||||
kubednsautoscaler_version: 1.1.2
|
||||
|
||||
# Limits for dnsmasq/kubedns apps
|
||||
dns_memory_limit: 170Mi
|
||||
@@ -39,9 +39,11 @@ netchecker_server_cpu_requests: 50m
|
||||
netchecker_server_memory_requests: 64M
|
||||
|
||||
# Dashboard
|
||||
dashboard_enabled: false
|
||||
dashboard_enabled: true
|
||||
dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64
|
||||
dashboard_image_tag: v1.6.3
|
||||
dashboard_image_tag: v1.8.1
|
||||
dashboard_init_image_repo: gcr.io/google_containers/kubernetes-dashboard-init-amd64
|
||||
dashboard_init_image_tag: v1.0.1
|
||||
|
||||
# Limits for dashboard
|
||||
dashboard_cpu_limit: 100m
|
||||
@@ -49,6 +51,13 @@ dashboard_memory_limit: 256M
|
||||
dashboard_cpu_requests: 50m
|
||||
dashboard_memory_requests: 64M
|
||||
|
||||
# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that
|
||||
# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs
|
||||
dashboard_use_custom_certs: false
|
||||
dashboard_certs_secret_name: kubernetes-dashboard-certs
|
||||
dashboard_tls_key_file: dashboard.key
|
||||
dashboard_tls_cert_file: dashboard.crt
|
||||
|
||||
# SSL
|
||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Delete old kubernetes-dashboard resources
|
||||
kube:
|
||||
name: "kubernetes-dashboard"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- 'ClusterRoleBinding'
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: Kubernetes Apps | Lay down dashboard template
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {file: dashboard.yml.j2, type: deploy, name: netchecker-agent}
|
||||
- {file: dashboard.yml.j2, type: deploy, name: kubernetes-dashboard}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Wait for kube-apiserver
|
||||
uri:
|
||||
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
client_cert: "{{ kube_apiserver_client_cert }}"
|
||||
client_key: "{{ kube_apiserver_client_key }}"
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
@@ -15,7 +18,9 @@
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{ item }}"
|
||||
state: absent
|
||||
with_items: ['deploy', 'svc']
|
||||
with_items:
|
||||
- 'deploy'
|
||||
- 'svc'
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
@@ -86,13 +91,13 @@
|
||||
- dnsmasq
|
||||
|
||||
- name: Kubernetes Apps | Netchecker
|
||||
include: tasks/netchecker.yml
|
||||
import_tasks: tasks/netchecker.yml
|
||||
when: deploy_netchecker
|
||||
tags:
|
||||
- netchecker
|
||||
|
||||
- name: Kubernetes Apps | Dashboard
|
||||
include: tasks/dashboard.yml
|
||||
import_tasks: tasks/dashboard.yml
|
||||
when: dashboard_enabled
|
||||
tags:
|
||||
- dashboard
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,12 +12,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy head version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.6 (RBAC enabled).
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
{% if rbac_enabled %}
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: {{ system_namespace }}
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -25,25 +38,92 @@ metadata:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: {{ system_namespace }}
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: {{ system_namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
{% endif %}
|
||||
|
||||
---
|
||||
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
||||
# Allows users to reach login page and other proxied dashboard URLs
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-anonymous
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["https:kubernetes-dashboard:"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-anonymous
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-anonymous
|
||||
subjects:
|
||||
- kind: User
|
||||
name: system:anonymous
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
@@ -63,8 +143,7 @@ spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
|
||||
# Image is tagged and updated with :head, so always pull it.
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ dashboard_cpu_limit }}
|
||||
@@ -73,27 +152,48 @@ spec:
|
||||
cpu: {{ dashboard_cpu_requests }}
|
||||
memory: {{ dashboard_memory_requests }}
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
{% if dashboard_use_custom_certs %}
|
||||
- --tls-key-file={{ dashboard_tls_key_file }}
|
||||
- --tls-cert-file={{ dashboard_tls_cert_file }}
|
||||
{% else %}
|
||||
- --auto-generate-certificates
|
||||
{% endif %}
|
||||
- --authentication-mode=token{% if kube_basic_auth|default(false) %},basic{% endif %}
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 9090
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
{% if rbac_enabled %}
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: {{ dashboard_certs_secret_name }}
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
{% endif %}
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@@ -103,8 +203,7 @@ metadata:
|
||||
namespace: {{ system_namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 9090
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Wait for kube-apiserver
|
||||
uri:
|
||||
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
|
||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||
validate_certs: no
|
||||
client_cert: "{{ kube_apiserver_client_cert }}"
|
||||
client_key: "{{ kube_apiserver_client_key }}"
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
|
||||
@@ -39,8 +39,8 @@ spec:
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
- name: dockercontainers
|
||||
mountPath: "{{ docker_daemon_graph }}/containers"
|
||||
readOnly: true
|
||||
- name: config
|
||||
mountPath: "{{ fluentd_config_dir }}"
|
||||
@@ -49,9 +49,9 @@ spec:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
- name: dockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
path: {{ docker_daemon_graph }}/containers
|
||||
- name: config
|
||||
configMap:
|
||||
name: fluentd-config
|
||||
|
||||
@@ -3,3 +3,18 @@ helm_enabled: false
|
||||
|
||||
# specify a dir and attach it to helm for HELM_HOME.
|
||||
helm_home_dir: "/root/.helm"
|
||||
|
||||
# Deployment mode: host or docker
|
||||
helm_deployment_type: host
|
||||
|
||||
# Do not download the local repository cache on helm init
|
||||
helm_skip_refresh: false
|
||||
|
||||
# Set URL for stable repository
|
||||
# helm_stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
|
||||
|
||||
# Set node selector options for Tiller Deployment manifest.
|
||||
# tiller_node_selectors: "key1=val1,key2=val2"
|
||||
|
||||
# Override values for the Tiller Deployment manifest.
|
||||
# tiller_override: "key1=val1,key2=val2"
|
||||
|
||||
8
roles/kubernetes-apps/helm/tasks/install_docker.yml
Normal file
8
roles/kubernetes-apps/helm/tasks/install_docker.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Helm | Set up helm docker launcher
|
||||
template:
|
||||
src: helm-container.j2
|
||||
dest: "{{ bin_dir }}/helm"
|
||||
owner: root
|
||||
mode: 0755
|
||||
register: helm_container
|
||||
23
roles/kubernetes-apps/helm/tasks/install_host.yml
Normal file
23
roles/kubernetes-apps/helm/tasks/install_host.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Helm | Compare host helm with hyperkube container
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir --entrypoint /usr/bin/cmp {{ helm_image_repo }}:{{ helm_image_tag }} /usr/local/bin/helm /systembindir/helm"
|
||||
register: helm_task_compare_result
|
||||
until: helm_task_compare_result.rc in [0,1,2]
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
failed_when: "helm_task_compare_result.rc not in [0,1,2]"
|
||||
|
||||
- name: Helm | Copy helm from helm container
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir --entrypoint /bin/cp {{ helm_image_repo }}:{{ helm_image_tag }} -f /usr/local/bin/helm /systembindir/helm"
|
||||
when: helm_task_compare_result.rc != 0
|
||||
register: helm_task_result
|
||||
until: helm_task_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
|
||||
- name: Helm | Copy socat wrapper for Container Linux
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
||||
args:
|
||||
creates: "{{ bin_dir }}/socat"
|
||||
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
@@ -3,12 +3,7 @@
|
||||
file: path={{ helm_home_dir }} state=directory
|
||||
|
||||
- name: Helm | Set up helm launcher
|
||||
template:
|
||||
src: helm-container.j2
|
||||
dest: "{{ bin_dir }}/helm"
|
||||
owner: root
|
||||
mode: 0755
|
||||
register: helm_container
|
||||
include_tasks: "install_{{ helm_deployment_type }}.yml"
|
||||
|
||||
- name: Helm | Lay Down Helm Manifests (RBAC)
|
||||
template:
|
||||
@@ -32,13 +27,15 @@
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
|
||||
|
||||
- name: Helm | Install/upgrade helm
|
||||
command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
|
||||
when: helm_container.changed
|
||||
|
||||
- name: Helm | Patch tiller deployment for RBAC
|
||||
command: "{{bin_dir}}/kubectl patch deployment tiller-deploy -p '{\"spec\":{\"template\":{\"spec\":{\"serviceAccount\":\"tiller\"}}}}' -n {{ system_namespace }}"
|
||||
when: rbac_enabled
|
||||
command: >
|
||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }}
|
||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
|
||||
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
|
||||
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
|
||||
|
||||
- name: Helm | Set up bash completion
|
||||
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
|
||||
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )
|
||||
when: ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)) and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||
|
||||
@@ -6,6 +6,11 @@
|
||||
-v {{ helm_home_dir }}:{{ helm_home_dir }}:rw \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
-v {{ dir }}:{{ dir }}:ro \
|
||||
{% endfor -%}
|
||||
{% endfor -%}
|
||||
{% if http_proxy is defined or https_proxy is defined -%}
|
||||
-e http_proxy="{{proxy_env.http_proxy}}" \
|
||||
-e https_proxy="{{proxy_env.https_proxy}}" \
|
||||
-e no_proxy="{{proxy_env.no_proxy}}" \
|
||||
{% endif -%}
|
||||
{{ helm_image_repo }}:{{ helm_image_tag}} \
|
||||
"$@"
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap
|
||||
local_volume_provisioner_bootstrap_image_tag: v1.0.0
|
||||
|
||||
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
|
||||
local_volume_provisioner_image_tag: v1.0.0
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Local Volume Provisioner | Ensure base dir is created on all hosts
|
||||
file:
|
||||
path: "{{ local_volume_base_dir }}"
|
||||
ensure: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups['k8s-cluster'] }}"
|
||||
failed_when: false
|
||||
|
||||
- name: Local Volume Provisioner | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
recurse: true
|
||||
|
||||
- name: Local Volume Provisioner | Create manifests
|
||||
template:
|
||||
src: "{{item.file}}.j2"
|
||||
dest: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.file}}"
|
||||
with_items:
|
||||
- {name: local-storage-provisioner-pv-binding, file: provisioner-admin-account.yml, type: clusterrolebinding}
|
||||
- {name: local-volume-config, file: volume-config.yml, type: configmap}
|
||||
- {name: local-volume-provisioner, file: provisioner-ds.yml, type: daemonset}
|
||||
register: local_volume_manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
||||
- name: Local Volume Provisioner | Apply manifests
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ local_volume_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-storage-admin
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-storage-provisioner-pv-binding
|
||||
namespace: {{ system_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-storage-admin
|
||||
namespace: {{ system_namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:persistent-volume-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-storage-provisioner-node-binding
|
||||
namespace: {{ system_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-storage-admin
|
||||
namespace: {{ system_namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: local-volume-provisioner
|
||||
namespace: "{{ system_namespace }}"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-volume-provisioner
|
||||
spec:
|
||||
containers:
|
||||
- name: provisioner
|
||||
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: discovery-vol
|
||||
mountPath: "/local-disks"
|
||||
- name: local-volume-config
|
||||
mountPath: /etc/provisioner/config/
|
||||
env:
|
||||
- name: MY_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: MY_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: discovery-vol
|
||||
hostPath:
|
||||
path: "{{ local_volume_base_dir }}"
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: local-volume-config
|
||||
name: local-volume-config
|
||||
serviceAccount: local-storage-admin
|
||||
@@ -0,0 +1,12 @@
|
||||
# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE.
|
||||
# It is a map from storage class to its mount configuration.
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-volume-config
|
||||
namespace: {{ system_namespace }}
|
||||
data:
|
||||
storageClassMap: |
|
||||
local-storage:
|
||||
hostDir: "{{ local_volume_base_dir }}"
|
||||
mountDir: "/mnt/local-storage/"
|
||||
@@ -20,8 +20,21 @@ dependencies:
|
||||
tags:
|
||||
- apps
|
||||
- helm
|
||||
- role: kubernetes-apps/local_volume_provisioner
|
||||
when: local_volumes_enabled
|
||||
tags:
|
||||
- apps
|
||||
- local_volume_provisioner
|
||||
- storage
|
||||
# istio role should be last because it takes a long time to initialize and
|
||||
# will cause timeouts trying to start other addons.
|
||||
- role: kubernetes-apps/istio
|
||||
when: istio_enabled
|
||||
tags:
|
||||
- apps
|
||||
- istio
|
||||
- role: kubernetes-apps/persistent_volumes
|
||||
when: persistent_volumes_enabled
|
||||
tags:
|
||||
- apps
|
||||
- persistent_volumes
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
---
|
||||
|
||||
- name: Contiv | Wait for netmaster
|
||||
uri:
|
||||
url: "http://127.0.0.1:{{ contiv_netmaster_port }}/info"
|
||||
register: result
|
||||
until: result.status is defined and result.status == 200
|
||||
retries: 10
|
||||
delay: 5
|
||||
|
||||
- name: Contiv | Get global configuration
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
global info --json --all
|
||||
register: global_config
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- set_fact:
|
||||
contiv_global_config: "{{ (global_config.stdout|from_json)[0] }}"
|
||||
|
||||
- name: Contiv | Set global forwarding mode
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
global set --fwd-mode={{ contiv_fwd_mode }}
|
||||
when: "contiv_global_config.get('fwdMode', '') != contiv_fwd_mode"
|
||||
run_once: true
|
||||
|
||||
- name: Contiv | Set global fabric mode
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
global set --fabric-mode={{ contiv_fabric_mode }}
|
||||
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
|
||||
run_once: true
|
||||
|
||||
- name: Contiv | Get existing networks
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
net ls -q
|
||||
register: net_result
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Contiv | Create networks
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
net create \
|
||||
--encap={{ item.encap|default("vxlan") }} \
|
||||
--gateway={{ item.gateway }} \
|
||||
--nw-type={{ item.nw_type|default("data") }} \
|
||||
--pkt-tag={{ item.pkt_tag|default("0") }} \
|
||||
--subnet={{ item.subnet }} \
|
||||
--tenant={{ item.tenant|default("default") }} \
|
||||
"{{ item.name }}"
|
||||
with_items: "{{ contiv_networks }}"
|
||||
when: item['name'] not in net_result.stdout_lines
|
||||
run_once: true
|
||||
|
||||
- name: Contiv | Check if default group exists
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
group ls -q
|
||||
register: group_result
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Contiv | Create default group
|
||||
command: |
|
||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||
group create default-net default
|
||||
when: "'default' not in group_result.stdout_lines"
|
||||
run_once: true
|
||||
15
roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
Normal file
15
roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
|
||||
- name: Contiv | Create Kubernetes resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ contiv_manifests_results.results }}"
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- import_tasks: configure.yml
|
||||
@@ -1,19 +1,14 @@
|
||||
---
|
||||
- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml"
|
||||
run_once: true
|
||||
when: rbac_enabled and flannel_rbac_manifest.changed
|
||||
|
||||
- name: Flannel | Start Resources
|
||||
kube:
|
||||
name: "kube-flannel"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/cni-flannel.yml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ flannel_manifest.changed }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
with_items: "{{ flannel_node_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
|
||||
- name: Flannel | Wait for flannel subnet.env file presence
|
||||
wait_for:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user