mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f97687d19 | ||
|
|
447605ca0e | ||
|
|
3901480bc1 | ||
|
|
c42cb8f9b2 | ||
|
|
5c28bb0679 | ||
|
|
6d53229986 | ||
|
|
1e57d2e21a | ||
|
|
ea41fc5e74 | ||
|
|
4167807f17 | ||
|
|
2ac1c7562f | ||
|
|
2d6e31d281 | ||
|
|
0a19d1bf01 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
**/vagrant_ansible_inventory
|
||||
inventory/credentials/
|
||||
inventory/group_vars/fake_hosts.yml
|
||||
inventory/host_vars/
|
||||
temp
|
||||
.idea
|
||||
.tox
|
||||
@@ -8,19 +11,12 @@ temp
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
!inventory/local
|
||||
!inventory/sample
|
||||
inventory/*/artifacts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -28,6 +24,7 @@ __pycache__/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
inventory/*/artifacts/
|
||||
env/
|
||||
build/
|
||||
credentials/
|
||||
|
||||
164
.gitlab-ci.yml
164
.gitlab-ci.yml
@@ -1,4 +1,3 @@
|
||||
---
|
||||
stages:
|
||||
- unit-tests
|
||||
- moderator
|
||||
@@ -9,7 +8,7 @@ stages:
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
@@ -25,6 +24,7 @@ variables:
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
KUBEADM_ENABLED: "false"
|
||||
LOG_LEVEL: "-vv"
|
||||
|
||||
# asia-east1-a
|
||||
@@ -42,7 +42,7 @@ before_script:
|
||||
tags:
|
||||
- kubernetes
|
||||
- docker
|
||||
image: quay.io/kubespray/kubespray:v2.8
|
||||
image: quay.io/kubespray/kubespray:v2.7
|
||||
|
||||
.docker_service: &docker_service
|
||||
services:
|
||||
@@ -91,7 +91,9 @@ before_script:
|
||||
- cd tests && make create-${CI_PLATFORM} -s ; cd -
|
||||
|
||||
# Check out latest tag if testing upgrade
|
||||
- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
# Uncomment when gitlab kubespray repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
|
||||
@@ -135,7 +137,9 @@ before_script:
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
- >
|
||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
|
||||
## Ping the between 2 pod
|
||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
@@ -233,95 +237,95 @@ before_script:
|
||||
|
||||
# Test matrix. Leave the comments for markup scripts.
|
||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_cilium_variables: &coreos_cilium_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.debian9_calico_variables: &debian9_calico_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_kube_router_variables: ¢os7_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_kube_router_variables: &coreos_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.opensuse_canal_variables: &opensuse_canal_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
|
||||
@@ -363,8 +367,6 @@ gce_centos7-flannel-addons:
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
gce_centos-weave-kubeadm-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -373,7 +375,21 @@ gce_centos-weave-kubeadm-sep:
|
||||
<<: *gce_variables
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
@@ -383,7 +399,8 @@ gce_ubuntu-weave-sep:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_weave_sep_variables
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-part2
|
||||
@@ -415,6 +432,7 @@ gce_centos7-flannel-addons-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
|
||||
gce_ubuntu-weave-sep-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -468,16 +486,6 @@ gce_ubuntu-canal-kubeadm-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
|
||||
gce_centos-weave-kubeadm-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
@@ -728,7 +736,7 @@ yamllint:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- yamllint .
|
||||
- yamllint roles
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
@@ -739,73 +747,3 @@ tox-inventory-builder:
|
||||
- cd contrib/inventory_builder && tox
|
||||
when: manual
|
||||
except: ['triggers', 'master']
|
||||
|
||||
|
||||
# Tests for contrib/terraform/
|
||||
.terraform_install: &terraform_install
|
||||
<<: *job
|
||||
before_script:
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
# Install Terraform
|
||||
- apt-get install -y unzip
|
||||
- curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip
|
||||
- unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version
|
||||
# Prepare inventory
|
||||
- cp -LRp contrib/terraform/$PROVIDER/sample-inventory inventory/$CLUSTER
|
||||
- cd inventory/$CLUSTER
|
||||
- ln -s ../../contrib/terraform/$PROVIDER/hosts
|
||||
- terraform init ../../contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- export TF_VAR_public_key_path=""
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
.terraform_validate: &terraform_validate
|
||||
<<: *terraform_install
|
||||
stage: unit-tests
|
||||
script:
|
||||
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply: &terraform_apply
|
||||
<<: *terraform_install
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
script:
|
||||
- terraform apply -auto-approve ../../contrib/terraform/$PROVIDER
|
||||
- ansible-playbook -i hosts ../../cluster.yml
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- cd inventory/$CLUSTER
|
||||
- terraform destroy -auto-approve ../../contrib/terraform/$PROVIDER
|
||||
|
||||
tf-validate-openstack:
|
||||
<<: *terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
<<: *terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-apply-packet:
|
||||
<<: *terraform_apply
|
||||
variables:
|
||||
TF_VERSION: 0.11.11
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TF_VAR_cluster_name: $CI_COMMIT_REF_NAME
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_nodes: "1"
|
||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
TF_VAR_facility: "ewr1"
|
||||
|
||||
@@ -7,5 +7,4 @@
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (https://git.k8s.io/community/CLA.md#the-contributor-license-agreement)
|
||||
5. Submit a pull request.
|
||||
4. Submit a pull request.
|
||||
|
||||
@@ -4,7 +4,7 @@ RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python-dev sshpass apt-transport-https jq \
|
||||
libssl-dev python-dev sshpass apt-transport-https \
|
||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
@@ -14,6 +14,3 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.11.3/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
|
||||
3
OWNERS
3
OWNERS
@@ -1,4 +1,5 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
# See the OWNERS file documentation:
|
||||
# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
|
||||
|
||||
approvers:
|
||||
- kubespray-approvers
|
||||
|
||||
@@ -11,10 +11,8 @@ aliases:
|
||||
- riverzhang
|
||||
- holser
|
||||
- smana
|
||||
- verwilst
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- chapsuk
|
||||
- mirwan
|
||||
- miouge1
|
||||
|
||||
39
README.md
39
README.md
@@ -3,10 +3,10 @@
|
||||
Deploy a Production Ready Kubernetes Cluster
|
||||
============================================
|
||||
|
||||
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
@@ -17,8 +17,15 @@ Quick Start
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
### Current release
|
||||
2.8.2
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Ansible version
|
||||
|
||||
Ansible v2.7.0 is failing and/or produce unexpected results due to [ansible/ansible/issues/46600](https://github.com/ansible/ansible/issues/46600)
|
||||
|
||||
#### Usage
|
||||
|
||||
# Install dependencies from ``requirements.txt``
|
||||
@@ -86,7 +93,6 @@ Documents
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
@@ -108,27 +114,27 @@ Supported Components
|
||||
--------------------
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.5
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.26
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.12.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.24
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||
- Network Plugin
|
||||
- [calico](https://github.com/projectcalico/calico) v3.4.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.0
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.4.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.2.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
|
||||
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||
@@ -138,7 +144,7 @@ plugins can be deployed for a given single cluster.
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- **Ansible v2.7.6 (or newer) and python-netaddr is installed on the machine
|
||||
- **Ansible v2.5 (or newer) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
@@ -150,14 +156,6 @@ Requirements
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Hardware:
|
||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
|
||||
Network Plugins
|
||||
---------------
|
||||
|
||||
@@ -200,6 +198,7 @@ Tools and projects on top of Kubespray
|
||||
--------------------------------------
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
CI Tests
|
||||
|
||||
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@@ -23,7 +23,7 @@ SUPPORTED_OS = {
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||
}
|
||||
|
||||
@@ -50,10 +50,6 @@ $kube_node_instances = $num_instances
|
||||
$kube_node_instances_with_disks = false
|
||||
$kube_node_instances_with_disks_size = "20G"
|
||||
$kube_node_instances_with_disks_number = 2
|
||||
$override_disk_size = false
|
||||
$disk_size = "20GB"
|
||||
$local_path_provisioner_enabled = false
|
||||
$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
|
||||
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
@@ -101,13 +97,6 @@ Vagrant.configure("2") do |config|
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if ($override_disk_size)
|
||||
unless Vagrant.has_plugin?("vagrant-disksize")
|
||||
system "vagrant plugin install vagrant-disksize"
|
||||
end
|
||||
config.disksize.size = $disk_size
|
||||
end
|
||||
|
||||
(1..$num_instances).each do |i|
|
||||
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
|
||||
|
||||
@@ -131,7 +120,6 @@ Vagrant.configure("2") do |config|
|
||||
vb.cpus = $vm_cpus
|
||||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
@@ -177,28 +165,24 @@ Vagrant.configure("2") do |config|
|
||||
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"flannel_interface": "eth1",
|
||||
"kube_network_plugin": $network_plugin,
|
||||
"kube_network_plugin_multus": $multi_networking,
|
||||
"docker_keepcache": "1",
|
||||
"download_run_once": "False",
|
||||
"download_localhost": "False",
|
||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}"
|
||||
"download_run_once": "True",
|
||||
"download_localhost": "False"
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
if File.exist?(File.join( $inventory, "hosts.ini"))
|
||||
ansible.inventory_path = $inventory
|
||||
end
|
||||
ansible.become = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "--ask-become-pass"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
theme: jekyll-theme-slate
|
||||
41
cluster.yml
41
cluster.yml
@@ -1,18 +1,32 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.6"
|
||||
- name: "Check ansible version !=2.7.0"
|
||||
assert:
|
||||
msg: "Ansible must be v2.7.6 or higher"
|
||||
msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.6", ">=")
|
||||
- ansible_version.string is version("2.7.0", "!=")
|
||||
- ansible_version.string is version("2.5.0", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: deploy warning for non kubeadm
|
||||
debug:
|
||||
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
|
||||
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||
|
||||
- name: deploy cluster for non kubeadm
|
||||
pause:
|
||||
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
|
||||
echo: no
|
||||
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
@@ -34,14 +48,13 @@
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: false
|
||||
gather_facts: true
|
||||
pre_tasks:
|
||||
- name: gather facts from all instances
|
||||
setup:
|
||||
delegate_to: "{{item}}"
|
||||
delegate_facts: true
|
||||
delegate_facts: True
|
||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||
run_once: true
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -83,7 +96,7 @@
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
@@ -91,7 +104,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -108,10 +121,16 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: network }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
apiVersion: "2015-06-15"
|
||||
|
||||
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||
@@ -35,3 +34,4 @@ imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
- set_fact:
|
||||
base_dir: "{{playbook_dir}}/.generated/"
|
||||
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
---
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
kubeadm_enabled: true
|
||||
|
||||
kubelet_fail_swap_on: false
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
@@ -56,7 +55,7 @@
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
# groups: sudo
|
||||
#groups: sudo
|
||||
append: yes
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
@@ -19,7 +18,7 @@
|
||||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
# recreate: yes
|
||||
#recreate: yes
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
DISTROS=(debian centos)
|
||||
NETCHECKER_HOST=${NODES[0]}
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
|
||||
)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
DISTROS=(debian centos)
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico {}'
|
||||
'kube_network_plugin=canal {}'
|
||||
'kube_network_plugin=cilium {}'
|
||||
'kube_network_plugin=flannel {}'
|
||||
'kube_network_plugin=weave {}'
|
||||
'kube_network_plugin=calico {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=canal {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=cilium {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=flannel {"kubeadm_enabled":true}'
|
||||
'kube_network_plugin=weave {"kubeadm_enabled":true}'
|
||||
)
|
||||
|
||||
@@ -17,9 +17,6 @@
|
||||
#
|
||||
# Advanced usage:
|
||||
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
# Add hosts with different ip and access ip:
|
||||
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||
# Delete a host: inventory.py -10.10.1.3
|
||||
# Delete a host by id: inventory.py -node1
|
||||
#
|
||||
@@ -34,21 +31,21 @@
|
||||
# ip: X.X.X.X
|
||||
|
||||
from collections import OrderedDict
|
||||
from ipaddress import ip_address
|
||||
from ruamel.yaml import YAML
|
||||
try:
|
||||
import configparser
|
||||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||
'calico-rr', 'vault']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
|
||||
|
||||
|
||||
def get_var_as_bool(name, default):
|
||||
@@ -57,8 +54,7 @@ def get_var_as_bool(name, default):
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.ini")
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
@@ -72,14 +68,11 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
class KubesprayInventory(object):
|
||||
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||
delimiters=('\t', ' '))
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
self.config.read(self.config_file)
|
||||
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
@@ -88,7 +81,6 @@ class KubesprayInventory(object):
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
@@ -109,9 +101,8 @@ class KubesprayInventory(object):
|
||||
|
||||
def write_config(self, config_file):
|
||||
if config_file:
|
||||
with open(self.config_file, 'w') as f:
|
||||
yaml.dump(self.yaml_config, f)
|
||||
|
||||
with open(config_file, 'w') as f:
|
||||
self.config.write(f)
|
||||
else:
|
||||
print("WARNING: Unable to save config. Make sure you set "
|
||||
"CONFIG_FILE env var.")
|
||||
@@ -121,29 +112,28 @@ class KubesprayInventory(object):
|
||||
print("DEBUG: {0}".format(msg))
|
||||
|
||||
def get_ip_from_opts(self, optstring):
|
||||
if 'ip' in optstring:
|
||||
return optstring['ip']
|
||||
else:
|
||||
opts = optstring.split(' ')
|
||||
for opt in opts:
|
||||
if '=' not in opt:
|
||||
continue
|
||||
k, v = opt.split('=')
|
||||
if k == "ip":
|
||||
return v
|
||||
raise ValueError("IP parameter not found in options")
|
||||
|
||||
def ensure_required_groups(self, groups):
|
||||
for group in groups:
|
||||
if group == 'all':
|
||||
try:
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config:
|
||||
all_dict = OrderedDict([('hosts', OrderedDict({})),
|
||||
('children', OrderedDict({}))])
|
||||
self.yaml_config = {'all': all_dict}
|
||||
else:
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config['all']['children']:
|
||||
self.yaml_config['all']['children'][group] = {'hosts': {}}
|
||||
self.config.add_section(group)
|
||||
except configparser.DuplicateSectionError:
|
||||
pass
|
||||
|
||||
def get_host_id(self, host):
|
||||
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||
try:
|
||||
short_hostname = host.split('.')[0]
|
||||
return int(re.findall("\\d+$", short_hostname)[-1])
|
||||
return int(re.findall("\d+$", short_hostname)[-1])
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
@@ -151,12 +141,12 @@ class KubesprayInventory(object):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
for host, opts in self.config.items('all'):
|
||||
existing_hosts[host] = opts
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
except configparser.NoSectionError:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
@@ -173,53 +163,22 @@ class KubesprayInventory(object):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
else:
|
||||
ip = host
|
||||
access_ip = host
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
elif self.exists_ip(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
|
||||
host, host)
|
||||
elif host[0].isalpha():
|
||||
raise Exception("Adding hosts by hostname is not supported.")
|
||||
|
||||
return all_hosts
|
||||
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
def ips(start_address, end_address):
|
||||
try:
|
||||
# Python 3.x
|
||||
start = int(ip_address(start_address))
|
||||
end = int(ip_address(end_address))
|
||||
except:
|
||||
# Python 2.7
|
||||
start = int(ip_address(unicode(start_address)))
|
||||
end = int(ip_address(unicode(end_address)))
|
||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||
|
||||
for host in hosts:
|
||||
if '-' in host and not host.startswith('-'):
|
||||
start, end = host.strip().split('-')
|
||||
try:
|
||||
reworked_hosts.extend(ips(start, end))
|
||||
except ValueError:
|
||||
raise Exception("Range of ip_addresses isn't valid")
|
||||
else:
|
||||
reworked_hosts.append(host)
|
||||
return reworked_hosts
|
||||
|
||||
def exists_hostname(self, existing_hosts, hostname):
|
||||
return hostname in existing_hosts.keys()
|
||||
|
||||
@@ -237,34 +196,16 @@ class KubesprayInventory(object):
|
||||
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
for role in self.config.sections():
|
||||
for host, _ in self.config.items(role):
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug(
|
||||
"Host {0} removed from role {1}".format(host, role)) # noqa
|
||||
del self.yaml_config['all']['children'][role]['hosts'][host] # noqa
|
||||
# purge from all
|
||||
if self.yaml_config['all']['hosts']:
|
||||
all_hosts = self.yaml_config['all']['hosts'].copy()
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug("Host {0} removed from role all".format(host))
|
||||
del self.yaml_config['all']['hosts'][host]
|
||||
self.debug("Host {0} removed from role {1}".format(host,
|
||||
role))
|
||||
self.config.remove_option(role, host)
|
||||
|
||||
def add_host_to_group(self, group, host, opts=""):
|
||||
self.debug("adding host {0} to group {1}".format(host, group))
|
||||
if group == 'all':
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s-cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
self.config.set(group, host, opts)
|
||||
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
@@ -275,16 +216,16 @@ class KubesprayInventory(object):
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
if host in self.config.items('kube-master'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
if host in self.config.items('kube-node'):
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
@@ -292,14 +233,14 @@ class KubesprayInventory(object):
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
if len(self.config['all']) >= SCALE_THRESHOLD:
|
||||
if self.config.has_option('etcd', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
||||
if self.config.has_option('kube-master', host):
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
@@ -309,31 +250,42 @@ class KubesprayInventory(object):
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('etcd', host)
|
||||
self.add_host_to_group('vault', host)
|
||||
|
||||
def load_file(self, files=None):
|
||||
'''Directly loads JSON to inventory.'''
|
||||
'''Directly loads JSON, or YAML file to inventory.'''
|
||||
|
||||
if not files:
|
||||
raise Exception("No input file specified.")
|
||||
|
||||
import json
|
||||
import yaml
|
||||
|
||||
for filename in list(files):
|
||||
# Try JSON
|
||||
# Try JSON, then YAML
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
except ValueError:
|
||||
raise Exception("Cannot read %s as JSON, or CSV", filename)
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = yaml.load(f)
|
||||
print("yaml")
|
||||
except ValueError:
|
||||
raise Exception("Cannot read %s as JSON, YAML, or CSV",
|
||||
filename)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
self.set_k8s_cluster()
|
||||
for group, hosts in data.items():
|
||||
self.ensure_required_groups([group])
|
||||
for host, opts in hosts.items():
|
||||
optstring = {'ansible_host': opts['ip'],
|
||||
'ip': opts['ip'],
|
||||
'access_ip': opts['ip']}
|
||||
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
|
||||
for key, val in opts.items():
|
||||
if key == "ip":
|
||||
continue
|
||||
optstring += " {0}={1}".format(key, val)
|
||||
|
||||
self.add_host_to_group('all', host, optstring)
|
||||
self.add_host_to_group(group, host)
|
||||
self.write_config(self.config_file)
|
||||
@@ -361,26 +313,24 @@ print_ips - Write a space-delimited list of IPs from "all" group
|
||||
|
||||
Advanced usage:
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.ini
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
'''
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
yaml.dump(self.yaml_config, sys.stdout)
|
||||
self.config.write(sys.stdout)
|
||||
|
||||
def print_ips(self):
|
||||
ips = []
|
||||
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||
for host, opts in self.config.items('all'):
|
||||
ips.append(self.get_ip_from_opts(opts))
|
||||
print(' '.join(ips))
|
||||
|
||||
@@ -390,6 +340,5 @@ def main(argv=None):
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
configparser>=3.3.0
|
||||
ruamel.yaml>=0.15.88
|
||||
ipaddress
|
||||
|
||||
@@ -34,9 +34,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv = inventory.KubesprayInventory()
|
||||
|
||||
def test_get_ip_from_opts(self):
|
||||
optstring = {'ansible_host': '10.90.3.2',
|
||||
'ip': '10.90.3.2',
|
||||
'access_ip': '10.90.3.2'}
|
||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||
expected = "10.90.3.2"
|
||||
result = self.inv.get_ip_from_opts(optstring)
|
||||
self.assertEqual(expected, result)
|
||||
@@ -50,7 +48,7 @@ class TestInventory(unittest.TestCase):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
||||
self.assertTrue(group in self.inv.config.sections())
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
@@ -69,49 +67,35 @@ class TestInventory(unittest.TestCase):
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||
self.inv.config['all'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.config['all'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.config['all'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@@ -119,12 +103,8 @@ class TestInventory(unittest.TestCase):
|
||||
hostname = 'node1'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@@ -132,12 +112,8 @@ class TestInventory(unittest.TestCase):
|
||||
hostname = 'node99'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@@ -145,12 +121,8 @@ class TestInventory(unittest.TestCase):
|
||||
ip = '10.90.0.2'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@@ -158,40 +130,26 @@ class TestInventory(unittest.TestCase):
|
||||
ip = '10.90.0.200'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_delete_host_by_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, existing_hosts)
|
||||
|
||||
def test_delete_host_by_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
|
||||
@@ -199,71 +157,59 @@ class TestInventory(unittest.TestCase):
|
||||
proper_hostnames = ['node1', 'node2']
|
||||
bad_host = 'doesnotbelong2'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3'),
|
||||
('doesnotbelong2', 'whateveropts=ilike')])
|
||||
self.inv.config['all'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertTrue(
|
||||
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
||||
self.assertTrue(bad_host not in self.inv.config['all'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
opts = {'ip': '10.90.0.2'}
|
||||
opts = 'ip=10.90.0.2'
|
||||
|
||||
self.inv.add_host_to_group(group, host, opts)
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
self.assertEqual(self.inv.config[group].get(host), opts)
|
||||
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_all(self):
|
||||
group = 'all'
|
||||
hosts = OrderedDict([
|
||||
('node1', 'opt1'),
|
||||
('node2', 'opt2')])
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
for host, opt in hosts.items():
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
self.assertEqual(self.inv.config[group].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster'
|
||||
group = 'k8s-cluster:children'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertTrue(
|
||||
host in
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertTrue(host in self.inv.config[group])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
@@ -273,13 +219,11 @@ class TestInventory(unittest.TestCase):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
@@ -289,57 +233,8 @@ class TestInventory(unittest.TestCase):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_etcd(hosts.keys()[0:3])
|
||||
self.inv.set_kube_master(hosts.keys()[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
expected = ['10.90.0.2',
|
||||
'10.90.0.4',
|
||||
'10.90.0.5',
|
||||
'10.90.0.6',
|
||||
'10.90.0.8']
|
||||
result = self.inv.range2ips(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_range2ips_incorrect_range(self):
|
||||
host_range = ['10.90.0.4-a.9b.c.e']
|
||||
self.assertRaisesRegexp(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||
|
||||
@@ -6,5 +6,5 @@ This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer
|
||||
|
||||
## Install
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/metallb/metallb.yml
|
||||
```
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../library
|
||||
@@ -5,4 +5,3 @@ metallb:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
port: "7472"
|
||||
version: v0.7.3
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
become: true
|
||||
with_items: "{{ rendering.results }}"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
||||
|
||||
@@ -53,6 +53,22 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: leader-election
|
||||
labels:
|
||||
app: metallb
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
resourceNames: ["metallb-speaker"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config-watcher
|
||||
@@ -115,6 +131,21 @@ roleRef:
|
||||
kind: Role
|
||||
name: config-watcher
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: leader-election
|
||||
labels:
|
||||
app: metallb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
@@ -142,7 +173,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: speaker
|
||||
image: metallb/speaker:{{ metallb.version }}
|
||||
image: metallb/speaker:v0.6.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
@@ -199,7 +230,7 @@ spec:
|
||||
runAsUser: 65534 # nobody
|
||||
containers:
|
||||
- name: controller
|
||||
image: metallb/controller:{{ metallb.version }}
|
||||
image: metallb/controller:v0.6.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port={{ metallb.port }}
|
||||
@@ -219,3 +250,5 @@ spec:
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
@@ -22,3 +22,4 @@
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
|
||||
@@ -79,3 +79,4 @@
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
|
||||
@@ -38,4 +38,4 @@
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
||||
|
||||
@@ -8,9 +8,7 @@
|
||||
- register: "clusterrolebinding_state"
|
||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
- assert:
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
msg: "Cluster role binding is not present."
|
||||
- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." }
|
||||
|
||||
- register: "secret_state"
|
||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
@@ -26,6 +24,4 @@
|
||||
- register: "secret_state"
|
||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
- assert:
|
||||
that: "secret_state.stdout != \"\""
|
||||
msg: "Heketi config secret is not present."
|
||||
- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." }
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- holmsten
|
||||
- miouge1
|
||||
@@ -20,28 +20,31 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
|
||||
default_tags = "${var.default_tags}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
module "aws-elb" {
|
||||
source = "modules/elb"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||
default_tags = "${var.default_tags}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
module "aws-iam" {
|
||||
source = "modules/iam"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -57,7 +60,8 @@ resource "aws_instance" "bastion-server" {
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||
|
||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
@@ -68,6 +72,7 @@ resource "aws_instance" "bastion-server" {
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create K8s Master and worker nodes and etcd instances
|
||||
*
|
||||
@@ -79,14 +84,18 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
count = "${var.aws_kube_master_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
@@ -100,16 +109,19 @@ resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_etcd_size}"
|
||||
|
||||
count = "${var.aws_etcd_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
@@ -118,8 +130,10 @@ resource "aws_instance" "k8s-etcd" {
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_worker_size}"
|
||||
@@ -129,18 +143,22 @@ resource "aws_instance" "k8s-worker" {
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Create Kubespray Inventory File
|
||||
*
|
||||
@@ -158,6 +176,7 @@ data "template_file" "inventory" {
|
||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
@@ -168,4 +187,5 @@ resource "null_resource" "inventories" {
|
||||
triggers {
|
||||
template = "${data.template_file.inventory.rendered}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ resource "aws_security_group" "aws-elb" {
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = "${var.aws_elb_api_port}"
|
||||
|
||||
@@ -14,11 +14,14 @@ variable "k8s_secure_api_port" {
|
||||
description = "Secure Port of K8S API Server"
|
||||
}
|
||||
|
||||
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_subnet_ids_public" {
|
||||
description = "IDs of Public Subnets"
|
||||
type = "list"
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -21,7 +20,6 @@ EOF
|
||||
|
||||
resource "aws_iam_role" "kube-worker" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -43,7 +41,6 @@ EOF
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = "${aws_iam_role.kube-master.id}"
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -78,7 +75,6 @@ EOF
|
||||
resource "aws_iam_role_policy" "kube-worker" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||
role = "${aws_iam_role.kube-worker.id}"
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -128,6 +124,7 @@ resource "aws_iam_role_policy" "kube-worker" {
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
resource "aws_vpc" "cluster-vpc" {
|
||||
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
|
||||
@@ -10,14 +11,17 @@ resource "aws_vpc" "cluster-vpc" {
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
vpc = true
|
||||
}
|
||||
|
||||
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))}"
|
||||
@@ -25,7 +29,7 @@ resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count = "${length(var.aws_avail_zones)}"
|
||||
count="${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
|
||||
@@ -39,11 +43,12 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count = "${length(var.aws_avail_zones)}"
|
||||
count="${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||
|
||||
@@ -58,7 +63,6 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
|
||||
resource "aws_route_table" "kubernetes-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||
@@ -72,7 +76,6 @@ resource "aws_route_table" "kubernetes-public" {
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||
@@ -81,20 +84,24 @@ resource "aws_route_table" "kubernetes-private" {
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
#Kubernetes Security Groups
|
||||
|
||||
resource "aws_security_group" "kubernetes" {
|
||||
@@ -111,7 +118,7 @@ resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
|
||||
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
}
|
||||
|
||||
@@ -124,6 +131,7 @@ resource "aws_security_group_rule" "allow-all-egress" {
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||
type = "ingress"
|
||||
from_port = 22
|
||||
|
||||
@@ -12,8 +12,10 @@ output "aws_subnet_ids_public" {
|
||||
|
||||
output "aws_security_group" {
|
||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ variable "aws_vpc_cidr_block" {
|
||||
description = "CIDR Blocks for AWS VPC"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_cluster_name" {
|
||||
description = "Name of Cluster"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "AWS Availability Zones Used"
|
||||
type = "list"
|
||||
|
||||
@@ -14,6 +14,7 @@ output "etcd" {
|
||||
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
|
||||
}
|
||||
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ most modern installs of OpenStack that support the basic services.
|
||||
|
||||
### Known compatible public clouds
|
||||
- [Auro](https://auro.io/)
|
||||
- [Betacloud](https://www.betacloud.io/)
|
||||
- [BetaCloud](https://www.betacloud.io/)
|
||||
- [CityCloud](https://www.citycloud.com/)
|
||||
- [DreamHost](https://www.dreamhost.com/cloud/computing/)
|
||||
- [ELASTX](https://elastx.se/)
|
||||
@@ -109,7 +109,6 @@ Create an inventory directory for your cluster by copying the existing sample an
|
||||
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||
$ ln -s ../../contrib
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@@ -229,7 +228,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||
@@ -359,7 +358,7 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
|
||||
### Configure cluster variables
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/all/all.yml`:
|
||||
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||
- **bin_dir**:
|
||||
```
|
||||
# Directory where the binaries will be installed
|
||||
@@ -372,7 +371,7 @@ bin_dir: /opt/bin
|
||||
```
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||
@@ -416,8 +415,8 @@ ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||
```
|
||||
4. Get `admin`'s certificates and keys:
|
||||
```
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Configure kubectl:
|
||||
|
||||
@@ -20,12 +20,11 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
count = "${var.number_of_bastions ? 1 : 0}"
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
count = "${var.number_of_bastions ? length(var.bastion_allowed_remote_ips) : 0}"
|
||||
count = "${length(var.bastion_allowed_remote_ips)}"
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
@@ -88,6 +87,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
@@ -102,23 +102,22 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
# The join() hack is described here: https://github.com/hashicorp/terraform/issues/11566
|
||||
# As a workaround for creating "dynamic" lists (when, for example, no bastion host is created)
|
||||
|
||||
security_groups = ["${compact(list(
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"default",
|
||||
))}"]
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
@@ -133,11 +132,10 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${compact(list(
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
))}"]
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
@@ -148,6 +146,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
@@ -169,6 +168,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
@@ -193,6 +193,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
@@ -216,6 +217,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
@@ -230,12 +232,11 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${compact(list(
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
join(" ", openstack_networking_secgroup_v2.bastion.*.id),
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
"default",
|
||||
))}"]
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
@@ -246,6 +247,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
@@ -270,6 +272,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
@@ -318,6 +321,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
|
||||
@@ -4,6 +4,7 @@ output "router_id" {
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, list("")), 0)}"
|
||||
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
|
||||
@@ -6,13 +6,11 @@ public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# image to use for bastion, masters, standalone etcd instances, and nodes
|
||||
image = "<image name>"
|
||||
|
||||
# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
|
||||
ssh_user = "<cloud-provisioned user>"
|
||||
|
||||
# 0|1 bastion nodes
|
||||
number_of_bastions = 0
|
||||
|
||||
#flavor_bastion = "<UUID>"
|
||||
|
||||
# standalone etcds
|
||||
@@ -20,20 +18,14 @@ number_of_etcd = 0
|
||||
|
||||
# masters
|
||||
number_of_k8s_masters = 1
|
||||
|
||||
number_of_k8s_masters_no_etcd = 0
|
||||
|
||||
number_of_k8s_masters_no_floating_ip = 0
|
||||
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||
|
||||
flavor_k8s_master = "<UUID>"
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
|
||||
number_of_k8s_nodes_no_floating_ip = 4
|
||||
|
||||
#flavor_k8s_node = "<UUID>"
|
||||
|
||||
# GlusterFS
|
||||
@@ -48,11 +40,7 @@ number_of_k8s_nodes_no_floating_ip = 4
|
||||
|
||||
# networking
|
||||
network_name = "<network>"
|
||||
|
||||
external_net = "<UUID>"
|
||||
|
||||
subnet_cidr = "<cidr>"
|
||||
|
||||
floatingip_pool = "<pool>"
|
||||
|
||||
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||
|
||||
@@ -74,27 +74,27 @@ variable "ssh_user_gfs" {
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs"
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs"
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs"
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs"
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs"
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
@@ -147,13 +147,12 @@ variable "bastion_allowed_remote_ips" {
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
"protocol" = "tcp"
|
||||
"port_range_min" = 30000
|
||||
"port_range_max" = 32767
|
||||
"remote_ip_prefix" = "0.0.0.0/0"
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
# Kubernetes on Packet with Terraform
|
||||
|
||||
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||
[Packet](https://www.packet.com).
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on Packet bare metal. It should work in all locations and on most server types.
|
||||
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your Packet project.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../..//cluster.yml)
|
||||
to actually install Kubernetes with Kubespray.
|
||||
|
||||
### Kubernetes Nodes
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts.
|
||||
- Master nodes with etcd: `number_of_k8s_masters` variable
|
||||
- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable
|
||||
- Standalone etcd hosts: `number_of_etcd` variable
|
||||
- Kubernetes worker nodes: `number_of_k8s_nodes` variable
|
||||
|
||||
Note that the Ansible script will report an invalid configuration if you wind up
|
||||
with an *even number* of etcd instances since that is not a valid configuration. This
|
||||
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
||||
master nodes with etcd replicas. As an example, if you have three master nodes with
|
||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||
now six total etcd replicas.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Install dependencies: `sudo pip install -r requirements.txt`
|
||||
- Account with Packet Host
|
||||
- An SSH key pair
|
||||
|
||||
## SSH Key Setup
|
||||
|
||||
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tf will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tf to blank to prevent the duplicate key from being uploaded which will cause an error.
|
||||
|
||||
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
|
||||
|
||||
```ShellSession
|
||||
ssh-keygen -f ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
## Terraform
|
||||
Terraform will be used to provision all of the Packet resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
|
||||
#### Inventory files
|
||||
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/packet/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
|
||||
#### Packet API access
|
||||
|
||||
Your Packet API key must be available in the `PACKET_AUTH_TOKEN` environment variable.
|
||||
This key is typically stored outside of the code repo since it is considered secret.
|
||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||
|
||||
For more information on how to generate an API key or find your project ID, please see:
|
||||
https://support.packet.com/kb/articles/api-integrations
|
||||
|
||||
The Packet Project ID associated with the key will be set later in cluster.tf.
|
||||
|
||||
For more information about the API, please see:
|
||||
https://www.packet.com/developers/api/
|
||||
|
||||
Example:
|
||||
```ShellSession
|
||||
$ export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
```
|
||||
|
||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
||||
|
||||
#### Cluster variables
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|
||||
The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
|
||||
This helps when identifying which hosts are associated with each cluster.
|
||||
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
* cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
* packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
|
||||
#### Enable localhost access
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
`\# kubeconfig_localhost: false`
|
||||
becomes:
|
||||
`kubeconfig_localhost: true`
|
||||
|
||||
Once the Kubespray playbooks are run, a Kubernetes configuration file will be written to the local host at `inventory/$CLUSTER/artifacts/admin.conf`
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
### Initialization
|
||||
|
||||
Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
|
||||
$ export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
$ ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
|
||||
### Destroying cluster
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command.
|
||||
|
||||
## Ansible
|
||||
|
||||
### Node access
|
||||
|
||||
#### SSH
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
|
||||
#### Test access
|
||||
|
||||
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-etcd-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-k8s-master-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Set up kubectl
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
||||
|
||||
* Verify that Kubectl runs correctly
|
||||
```
|
||||
kubectl version
|
||||
```
|
||||
|
||||
* Verify that the Kubernetes configuration file has been copied over
|
||||
```
|
||||
cat inventory/alpha/$CLUSTER/admin.conf
|
||||
```
|
||||
|
||||
* Verify that all the nodes are running correctly.
|
||||
```
|
||||
kubectl version
|
||||
kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes
|
||||
```
|
||||
|
||||
## What's next
|
||||
|
||||
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||
@@ -1 +0,0 @@
|
||||
../terraform.py
|
||||
@@ -1,60 +0,0 @@
|
||||
# Configure the Packet Provider
|
||||
provider "packet" {}
|
||||
|
||||
resource "packet_ssh_key" "k8s" {
|
||||
count = "${var.public_key_path != "" ? 1 : 0}"
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${chomp(file(var.public_key_path))}"
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master" {
|
||||
depends_on = ["packet_ssh_key.k8s"]
|
||||
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
plan = "${var.plan_k8s_masters}"
|
||||
facility = "${var.facility}"
|
||||
operating_system = "${var.operating_system}"
|
||||
billing_cycle = "${var.billing_cycle}"
|
||||
project_id = "${var.packet_project_id}"
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
depends_on = ["packet_ssh_key.k8s"]
|
||||
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
plan = "${var.plan_k8s_masters_no_etcd}"
|
||||
facility = "${var.facility}"
|
||||
operating_system = "${var.operating_system}"
|
||||
billing_cycle = "${var.billing_cycle}"
|
||||
project_id = "${var.packet_project_id}"
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
depends_on = ["packet_ssh_key.k8s"]
|
||||
|
||||
count = "${var.number_of_etcd}"
|
||||
hostname = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
plan = "${var.plan_etcd}"
|
||||
facility = "${var.facility}"
|
||||
operating_system = "${var.operating_system}"
|
||||
billing_cycle = "${var.billing_cycle}"
|
||||
project_id = "${var.packet_project_id}"
|
||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_node" {
|
||||
depends_on = ["packet_ssh_key.k8s"]
|
||||
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
hostname = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
plan = "${var.plan_k8s_nodes}"
|
||||
facility = "${var.facility}"
|
||||
operating_system = "${var.operating_system}"
|
||||
billing_cycle = "${var.billing_cycle}"
|
||||
project_id = "${var.packet_project_id}"
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "k8s_masters" {
|
||||
value = "${packet_device.k8s_master.*.access_public_ipv4}"
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = "${packet_device.k8s_master_no_etcd.*.access_public_ipv4}"
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = "${packet_device.k8s_etcd.*.access_public_ipv4}"
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = "${packet_device.k8s_node.*.access_public_ipv4}"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
# your Kubernetes cluster name here
|
||||
cluster_name = "mycluster"
|
||||
|
||||
# Your Packet project ID. See https://support.packet.com/kb/articles/api-integrations
|
||||
packet_project_id = "Example-API-Token"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Packet nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Packet project
|
||||
# Terraform will complain if the public key is setup in Packet
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# cluster location
|
||||
facility = "ewr1"
|
||||
|
||||
# standalone etcds
|
||||
number_of_etcd = 0
|
||||
|
||||
plan_etcd = "t1.small.x86"
|
||||
|
||||
# masters
|
||||
number_of_k8s_masters = 1
|
||||
|
||||
number_of_k8s_masters_no_etcd = 0
|
||||
|
||||
plan_k8s_masters = "t1.small.x86"
|
||||
|
||||
plan_k8s_masters_no_etcd = "t1.small.x86"
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
|
||||
plan_k8s_nodes = "t1.small.x86"
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -1,56 +0,0 @@
|
||||
variable "cluster_name" {
|
||||
default = "kubespray"
|
||||
}
|
||||
|
||||
variable "packet_project_id" {
|
||||
description = "Your Packet project ID. See https://support.packet.com/kb/articles/api-integrations"
|
||||
}
|
||||
|
||||
variable "operating_system" {
|
||||
default = "ubuntu_16_04"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
}
|
||||
|
||||
variable "billing_cycle" {
|
||||
default = "hourly"
|
||||
}
|
||||
|
||||
variable "facility" {
|
||||
default = "dfw2"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters" {
|
||||
default = "c2.medium.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters_no_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
}
|
||||
|
||||
variable "plan_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_nodes" {
|
||||
default = "c2.medium.x86"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "number_of_etcd" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 0
|
||||
}
|
||||
@@ -218,47 +218,6 @@ def triton_machine(resource, module_name):
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('packet_device')
|
||||
def packet_device(resource, tfvars=None):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['hostname']
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'id': raw_attrs['id'],
|
||||
'facility': raw_attrs['facility'],
|
||||
'hostname': raw_attrs['hostname'],
|
||||
'operating_system': raw_attrs['operating_system'],
|
||||
'locked': parse_bool(raw_attrs['locked']),
|
||||
'tags': parse_list(raw_attrs, 'tags'),
|
||||
'plan': raw_attrs['plan'],
|
||||
'project_id': raw_attrs['project_id'],
|
||||
'state': raw_attrs['state'],
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
||||
'ansible_ssh_user': 'root', # it's always "root" on Packet
|
||||
# generic
|
||||
'ipv4_address': raw_attrs['network.0.address'],
|
||||
'public_ipv4': raw_attrs['network.0.address'],
|
||||
'ipv6_address': raw_attrs['network.1.address'],
|
||||
'public_ipv6': raw_attrs['network.1.address'],
|
||||
'private_ipv4': raw_attrs['network.2.address'],
|
||||
'provider': 'packet',
|
||||
}
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('packet_facility=' + attrs['facility'])
|
||||
groups.append('packet_operating_system=' + attrs['operating_system'])
|
||||
groups.append('packet_locked=%s' % attrs['locked'])
|
||||
groups.append('packet_state=' + attrs['state'])
|
||||
groups.append('packet_plan=' + attrs['plan'])
|
||||
|
||||
# groups specific to kubespray
|
||||
groups = groups + attrs['tags']
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('digitalocean_droplet')
|
||||
@calculate_mantl_vars
|
||||
def digitalocean_host(resource, tfvars=None):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
* [Readme](/)
|
||||
* [Comparisons](/docs/comparisons.md)
|
||||
* [Getting started](/docs/getting-started.md)
|
||||
* [Ansible](docs/ansible.md)
|
||||
* [Variables](/docs/vars.md)
|
||||
* [Ansible](/docs/ansible.md)
|
||||
* Operations
|
||||
* [Integration](docs/integration.md)
|
||||
* [Upgrades](/docs/upgrades.md)
|
||||
* [HA Mode](docs/ha-mode.md)
|
||||
* [Large deployments](docs/large-deployments.md)
|
||||
* CNI
|
||||
* [Calico](docs/calico.md)
|
||||
* [Contiv](docs/contiv.md)
|
||||
* [Flannel](docs/flannel.md)
|
||||
* [Kube Router](docs/kube-router.md)
|
||||
* [Weave](docs/weave.md)
|
||||
* [Multus](docs/multus.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Azure](docs/azure.md)
|
||||
* [OpenStack](/docs/openstack.md)
|
||||
* [vSphere](/docs/vsphere.md)
|
||||
* Operating Systems
|
||||
* [Atomic](docs/atomic.md)
|
||||
* [Debian](docs/debian.md)
|
||||
* [Coreos](docs/coreos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* Advanced
|
||||
* [Proxy](/docs/proxy.md)
|
||||
* [Downloads](docs/downloads.md)
|
||||
* [CRI-O](docs/cri-o.md)
|
||||
* [Netcheck](docs/netcheck.md)
|
||||
* [DNS Stack](docs/dns-stack.md)
|
||||
* [Kubernetes reliability](docs/kubernetes-reliability.md)
|
||||
* Developers
|
||||
* [Test cases](docs/test_cases.md)
|
||||
* [Vagrant](docs/vagrant.md)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
@@ -110,6 +110,7 @@ The following tags are defined in playbooks:
|
||||
| calico | Network plugin Calico
|
||||
| canal | Network plugin Canal
|
||||
| cloud-provider | Cloud-provider related tasks
|
||||
| dnsmasq | Configuring DNS stack for hosts and K8s apps
|
||||
| docker | Configuring docker for hosts
|
||||
| download | Fetching container images to a delegate host
|
||||
| etcd | Configuring etcd cluster
|
||||
@@ -151,11 +152,11 @@ Example command to filter and apply only DNS configuration tasks and skip
|
||||
everything else related to host OS configuration and downloading images of containers:
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os
|
||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||
```
|
||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||
```
|
||||
ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf
|
||||
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||
```
|
||||
And this prepares all container images locally (at the ansible runner node) without installing
|
||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||
|
||||
@@ -67,15 +67,6 @@ To re-define you need to edit the inventory and add a group variable `calico_net
|
||||
calico_network_backend: none
|
||||
```
|
||||
|
||||
##### Optional : Define the default pool CIDR
|
||||
|
||||
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool.
|
||||
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
|
||||
|
||||
```
|
||||
calico_pool_cidr: 10.233.64.0/20
|
||||
```
|
||||
|
||||
##### Optional : BGP Peering with border routers
|
||||
|
||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||
@@ -95,12 +86,6 @@ In order to define global peers, the `peers` variable can be defined in group_va
|
||||
In order to define peers on a per node basis, the `peers` variable must be defined in hostvars.
|
||||
NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
|
||||
|
||||
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
||||
This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
|
||||
```
|
||||
calico_advertise_cluster_ips: true
|
||||
```
|
||||
|
||||
##### Optional : Define global AS number
|
||||
|
||||
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# ---
|
||||
# peers:
|
||||
# - router_id: "10.99.0.34"
|
||||
#---
|
||||
#peers:
|
||||
# -router_id: "10.99.0.34"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.35"
|
||||
# as: "65xxx"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
#
|
||||
#loadbalancer_apiserver:
|
||||
# address: "10.99.0.44"
|
||||
# port: "8383"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# ---
|
||||
# peers:
|
||||
# - router_id: "10.99.0.2"
|
||||
#---
|
||||
#peers:
|
||||
# -router_id: "10.99.0.2"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.3"
|
||||
# as: "65xxx"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
#
|
||||
#loadbalancer_apiserver:
|
||||
# address: "10.99.0.21"
|
||||
# port: "8383"
|
||||
|
||||
@@ -19,9 +19,7 @@ on. Had it belonged to the new [operators world](https://coreos.com/blog/introdu
|
||||
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||
does generic configuration management tasks from the "OS operators" ansible
|
||||
world, plus some initial K8s clustering (with networking plugins included) and
|
||||
control plane bootstrapping.
|
||||
|
||||
Kubespray supports `kubeadm` for cluster creation since v2.3
|
||||
(and deprecated non-kubeadm deployment starting from v2.8)
|
||||
in order to consume life cycle management domain knowledge from it
|
||||
and offload generic OS configuration things from it, which hopefully benefits both sides.
|
||||
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-sigs/kubespray/issues/553)
|
||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||
knowledge from it and offload generic OS configuration things from it, which
|
||||
hopefully benefits both sides.
|
||||
|
||||
@@ -1,28 +1,31 @@
|
||||
CRI-O
|
||||
cri-o
|
||||
===============
|
||||
|
||||
[CRI-O] is a lightweight container runtime for Kubernetes.
|
||||
Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster.
|
||||
cri-o is container developed by kubernetes project.
|
||||
Currently, only basic function is supported for cri-o.
|
||||
|
||||
* Kubernetes supports CRI-O on v1.11.1 or later.
|
||||
* Helm and other tools may not function as normal due to dependency on Docker.
|
||||
* `scale.yml` and `upgrade-cluster.yml` are not supported on clusters using CRI-O.
|
||||
* cri-o is supported kubernetes 1.11.1 or later.
|
||||
* helm and other feature may not be supported due to docker dependency.
|
||||
* scale.yml and upgrade-cluster.yml are not supported.
|
||||
|
||||
_To use CRI-O instead of Docker, set the following variables:_
|
||||
helm and other feature may not be supported due to docker dependency.
|
||||
|
||||
Use cri-o instead of docker, set following variable:
|
||||
|
||||
#### all.yml
|
||||
|
||||
```yaml
|
||||
```
|
||||
kubeadm_enabled: true
|
||||
...
|
||||
download_container: false
|
||||
skip_downloads: false
|
||||
```
|
||||
|
||||
#### k8s-cluster.yml
|
||||
|
||||
```yaml
|
||||
```
|
||||
etcd_deployment_type: host
|
||||
kubelet_deployment_type: host
|
||||
container_manager: crio
|
||||
```
|
||||
|
||||
[CRI-O]: https://cri-o.io/
|
||||
|
||||
@@ -20,6 +20,10 @@ ndots value to be used in ``/etc/resolv.conf``
|
||||
|
||||
It is important to note that multiple search domains combined with high ``ndots``
|
||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||
The dnsmasq DaemonSet can accept lower ``ndots`` values and return NXDOMAIN
|
||||
replies for [bogus internal FQDNS](https://github.com/kubernetes/kubernetes/issues/19634#issuecomment-253948954)
|
||||
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
||||
protective, but still recursive resolver in front of kubedns.
|
||||
|
||||
#### searchdomains
|
||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||
@@ -37,7 +41,8 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
|
||||
|
||||
#### upstream_dns_servers
|
||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||
|
||||
DNS modes supported by Kubespray
|
||||
============================
|
||||
@@ -47,20 +52,32 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
|
||||
## dns_mode
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||
|
||||
#### coredns (default)
|
||||
This installs CoreDNS as the default cluster DNS for all queries.
|
||||
#### dnsmasq_kubedns
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||
|
||||
#### kubedns (default)
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
#### coredns
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||
all queries.
|
||||
|
||||
#### coredns_dual
|
||||
This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack.
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||
all queries. It will also deploy a secondary CoreDNS stack
|
||||
|
||||
#### manual
|
||||
This does not install coredns, but allows you to specify
|
||||
This does not install dnsmasq or kubedns, but allows you to specify
|
||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||
Use this method if you plan to install your own DNS server in the cluster after
|
||||
initial deployment.
|
||||
|
||||
#### none
|
||||
This does not install any of DNS solution at all. This basically disables cluster DNS completely and
|
||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||
leaves you with a non functional cluster.
|
||||
|
||||
## resolvconf_mode
|
||||
@@ -86,7 +103,7 @@ The following dns options are added to the docker daemon
|
||||
* attempts:2
|
||||
|
||||
For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking
|
||||
the --cluster_dns (either coredns or coredns_dual, depending on dns_mode) kubelet option into account.
|
||||
the --cluster_dns (either dnsmasq or kubedns, depending on dns_mode) kubelet option into account.
|
||||
For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which
|
||||
are not started/managed by k8s will also use these docker options.
|
||||
|
||||
@@ -98,7 +115,7 @@ servers, which in turn will forward queries to the system nameserver if required
|
||||
|
||||
#### host_resolvconf
|
||||
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||
configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode).
|
||||
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||
|
||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||
stage (``dns_early: true``), ``/etc/resolv.conf`` is configured to use the DNS servers found in ``upstream_dns_servers``
|
||||
@@ -113,11 +130,6 @@ Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that
|
||||
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
||||
cluster service names.
|
||||
|
||||
## Nodelocal DNS cache
|
||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query kube-dns / core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||
|
||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
@@ -56,12 +56,22 @@ Add worker nodes to the list under kube-node if you want to delete them (or util
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
|
||||
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
||||
|
||||
We support two ways to select the nodes:
|
||||
|
||||
- Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key \
|
||||
--extra-vars "node=nodename,nodename2"
|
||||
```
|
||||
or
|
||||
- Use `--limit nodename,nodename2` to select the node
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key \
|
||||
--limit nodename,nodename2"
|
||||
```
|
||||
|
||||
Connecting to Kubernetes
|
||||
------------------------
|
||||
|
||||
@@ -62,6 +62,34 @@ You can change the default configuration by overriding `kube_router_...` variabl
|
||||
these are named to follow `kube-router` command-line options as per
|
||||
<https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
|
||||
|
||||
## Caveats
|
||||
|
||||
### kubeadm_enabled: true
|
||||
|
||||
If you want to set `kube-router` to replace `kube-proxy`
|
||||
(`--run-service-proxy=true`) while using `kubeadm_enabled`,
|
||||
then 'kube-proxy` DaemonSet will be removed *after* kubeadm finishes
|
||||
running, as it's not possible to skip kube-proxy install in kubeadm flags
|
||||
and/or config, see https://github.com/kubernetes/kubeadm/issues/776.
|
||||
|
||||
Given above, if `--run-service-proxy=true` is needed it would be
|
||||
better to void `kubeadm_enabled` i.e. set:
|
||||
|
||||
```
|
||||
kubeadm_enabled: false
|
||||
kube_router_run_service_proxy: true
|
||||
|
||||
```
|
||||
|
||||
If for some reason you do want/need to set `kubeadm_enabled`, removing
|
||||
it afterwards behave better if kube-proxy is set to ipvs mode, i.e. set:
|
||||
|
||||
```
|
||||
kubeadm_enabled: true
|
||||
kube_router_run_service_proxy: true
|
||||
kube_proxy_mode: ipvs
|
||||
```
|
||||
|
||||
## Advanced BGP Capabilities
|
||||
https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
load on a delegate (the first K8s master node) then retrying failed
|
||||
push or download operations.
|
||||
|
||||
* Tune parameters for DNS related applications
|
||||
Those are ``dns_replicas``, ``dns_cpu_limit``,
|
||||
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
|
||||
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
|
||||
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
|
||||
Please note that limits must always be greater than or equal to requests.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
openSUSE Leap 15.0 and Tumbleweed
|
||||
openSUSE Leap 42.3 and Tumbleweed
|
||||
===============
|
||||
|
||||
openSUSE Leap installation Notes:
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
Packet
|
||||
===============
|
||||
|
||||
Kubespray provides support for bare metal deployments using the [Packet bare metal cloud](http://www.packet.com).
|
||||
Deploying upon bare metal allows Kubernetes to run at locations where an existing public or private cloud might not exist such
|
||||
as cell tower, edge collocated installations. The deployment mechanism used by Kubespray for Packet is similar to that used for
|
||||
AWS and OpenStack clouds (notably using Terraform to deploy the infrastructure). Terraform uses the Packet provider plugin
|
||||
to provision and configure hosts which are then used by the Kubespray Ansible playbooks. The Ansible inventory is generated
|
||||
dynamically from the Terraform state file.
|
||||
|
||||
## Local Host Configuration
|
||||
|
||||
To perform this installation, you will need a localhost to run Terraform/Ansible (laptop, VM, etc) and an account with Packet.
|
||||
In this example, we're using an m1.large CentOS 7 OpenStack VM as the localhost to kickoff the Kubernetes installation.
|
||||
You'll need Ansible, Git, and PIP.
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum install ansible
|
||||
sudo yum install git
|
||||
sudo yum install python-pip
|
||||
```
|
||||
|
||||
## Playbook SSH Key
|
||||
|
||||
An SSH key is needed by Kubespray/Ansible to run the playbooks.
|
||||
This key is installed into the bare metal hosts during the Terraform deployment.
|
||||
You can generate a key new key or use an existing one.
|
||||
|
||||
```bash
|
||||
ssh-keygen -f ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
## Install Terraform
|
||||
|
||||
Terraform is required to deploy the bare metal infrastructure. The steps below are for installing on CentOS 7.
|
||||
[More terraform installation options are available.](https://learn.hashicorp.com/terraform/getting-started/install.html)
|
||||
|
||||
Grab the latest version of Terraform and install it.
|
||||
```bash
|
||||
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip"
|
||||
sudo yum install unzip
|
||||
sudo unzip terraform_0.11.11_linux_amd64.zip -d /usr/local/bin/
|
||||
```
|
||||
|
||||
## Download Kubespray
|
||||
|
||||
Pull over Kubespray and setup any required libraries.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kubernetes-sigs/kubespray
|
||||
cd kubespray
|
||||
sudo pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Cluster Definition
|
||||
|
||||
In this example, a new cluster called "alpha" will be created.
|
||||
|
||||
```bash
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/alpha
|
||||
cd inventory/alpha/
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
```
|
||||
|
||||
Details about the cluster, such as the name, as well as the authentication tokens and project ID
|
||||
for Packet need to be defined. To find these values see [Packet API Integration](https://support.packet.com/kb/articles/api-integrations)
|
||||
|
||||
```bash
|
||||
vi cluster.tf
|
||||
```
|
||||
* cluster_name = alpha
|
||||
* packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456
|
||||
* public_key_path = 12345678-90AB-CDEF-GHIJ-KLMNOPQRSTUV
|
||||
|
||||
## Deploy Bare Metal Hosts
|
||||
|
||||
Initializing Terraform will pull down any necessary plugins/providers.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/packet/
|
||||
```
|
||||
|
||||
Run Terraform to deploy the hardware.
|
||||
|
||||
```bash
|
||||
terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
## Run Kubespray Playbooks
|
||||
|
||||
With the bare metal infrastructure deployed, Kubespray can now install Kubernetes and setup the cluster.
|
||||
|
||||
```bash
|
||||
ansible-playbook --become -i inventory/alpha/hosts cluster.yml
|
||||
```
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
Kubespray's roadmap
|
||||
=================
|
||||
|
||||
### Kubeadm
|
||||
- Switch to kubeadm deployment as the default method after some bugs are fixed:
|
||||
* Support for basic auth
|
||||
* cloudprovider cloud-config mount [#484](https://github.com/kubernetes/kubeadm/issues/484)
|
||||
|
||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||
@@ -9,12 +14,7 @@ Kubespray's roadmap
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||
|
||||
### Provisioning and cloud providers
|
||||
- [ ] Terraform to provision instances on:
|
||||
- [ ] GCE
|
||||
- [x] AWS (contrib/terraform/aws)
|
||||
- [x] Openstack (contrib/terraform/openstack)
|
||||
- [ ] Digital Ocean
|
||||
- [ ] Azure
|
||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||
@@ -34,12 +34,12 @@ Kubespray's roadmap
|
||||
|
||||
### Networking
|
||||
- [ ] Opencontrail
|
||||
- [ ] Consolidate roles/network_plugin and roles/kubernetes-apps/network_plugin
|
||||
- [ ] Consolidate network_plugins and kubernetes-apps/network_plugins
|
||||
|
||||
### Kubespray API
|
||||
- Perform all actions through an **API**
|
||||
- Store inventories / configurations of multiple clusters
|
||||
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
- Store inventories / configurations of mulltiple clusters
|
||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
|
||||
### Addons (helm or native ansible)
|
||||
Include optionals deployments to init the cluster:
|
||||
@@ -61,9 +61,10 @@ Include optionals deployments to init the cluster:
|
||||
- Deis Workflow
|
||||
|
||||
### Others
|
||||
- remove nodes (adding is already supported)
|
||||
- Organize and update documentation (split in categories)
|
||||
- Refactor downloads so it all runs in the beginning of deployment
|
||||
- Make bootstrapping OS more consistent
|
||||
- **consul** -> if officially supported by k8s
|
||||
- Flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||
|
||||
15
docs/vars.md
15
docs/vars.md
@@ -17,10 +17,12 @@ Some variables of note include:
|
||||
* *calico_version* - Specify version of Calico to use
|
||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||
* *docker_version* - Specify version of Docker to used (should be quoted
|
||||
string). Must match one of the keys defined for *docker_versioned_pkg*
|
||||
in `roles/container-engine/docker/vars/*.yml`.
|
||||
string)
|
||||
* *etcd_version* - Specify version of ETCD to use
|
||||
* *ipip* - Enables Calico ipip encapsulation by default
|
||||
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
|
||||
resides
|
||||
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
|
||||
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||
@@ -59,6 +61,8 @@ following default cluster parameters:
|
||||
overlap with kube_service_addresses.
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||
* *dns_setup* - Enables dnsmasq
|
||||
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||
@@ -82,14 +86,15 @@ and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
|
||||
|
||||
#### DNS variables
|
||||
|
||||
By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all
|
||||
By default, dnsmasq gets set up with 8.8.8.8 as an upstream DNS server and all
|
||||
other settings from your existing /etc/resolv.conf are lost. Set the following
|
||||
variables to match your requirements.
|
||||
|
||||
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||
addition to Kubespray deployed DNS
|
||||
* *nameservers* - Array of DNS servers configured for use by hosts
|
||||
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||
* *searchdomains* - Array of up to 4 search domains
|
||||
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||
|
||||
For more information, see [DNS
|
||||
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
|
||||
@@ -113,8 +118,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||
cgroup-driver option for Kubelet. By default autodetection is used
|
||||
to match Docker configuration.
|
||||
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
||||
from the kube-apiserver when the certificate expiration approaches.
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* must be defined as a dict:
|
||||
|
||||
@@ -14,9 +14,6 @@ After this step you should have:
|
||||
- UUID activated for each VM where Kubernetes will be deployed
|
||||
- A vSphere account with required privileges
|
||||
|
||||
If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes.
|
||||
|
||||
|
||||
## Kubespray configuration
|
||||
|
||||
First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
|
||||
@@ -40,8 +37,6 @@ Then, in the same file, you need to declare your vCenter credential following th
|
||||
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) |
|
||||
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||
| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) |
|
||||
| vsphere_zone_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0) |
|
||||
| vsphere_region_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0) |
|
||||
|
||||
Example configuration
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
|
||||
### Additional information:
|
||||
### * Will not upgrade etcd
|
||||
@@ -39,21 +38,21 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
|
||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||
hosts: kube-master
|
||||
#Handle upgrades to master components first to maintain backwards compat.
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: 1
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: kubernetes/master, tags: master, upgrade_cluster_setup: true }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||
|
||||
- name: Finally handle worker upgrades, based on given batch size
|
||||
hosts: kube-node:!kube-master
|
||||
#Finally handle worker upgrades, based on given batch size
|
||||
- hosts: kube-node:!kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: "{{ serial | default('20%') }}"
|
||||
roles:
|
||||
|
||||
46
index.html
46
index.html
@@ -1,46 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Kubespray - Deploy a Production Ready Kubernetes Cluster</title>
|
||||
<meta name="description" content="Deploy a Production Ready Kubernetes Cluster">
|
||||
<meta name="viewport" content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
|
||||
<link rel="stylesheet" href="//unpkg.com/docsify-themeable/dist/css/theme-simple.css">
|
||||
<style>
|
||||
:root {
|
||||
--base-font-size: 16px;
|
||||
--theme-color: rgb(104, 118, 52);
|
||||
--link-color: rgb(104, 118, 52);
|
||||
--link-color--hover: rgb(137, 152, 100);
|
||||
--sidebar-name-margin: 0;
|
||||
--sidebar-name-padding: 0;
|
||||
--code-font-size: .9em;
|
||||
}
|
||||
.sidebar > h1 {
|
||||
margin-bottom: -.75em;
|
||||
margin-top: .75em;
|
||||
}
|
||||
.markdown-section a code {
|
||||
color: var(--link-color)!important;
|
||||
}
|
||||
.markdown-section code:not([class*="lang-"]):not([class*="language-"]) {
|
||||
white-space: unset
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
</body>
|
||||
<script>
|
||||
window.$docsify = {
|
||||
name: 'Kubespray',
|
||||
loadSidebar: 'docs/_sidebar.md',
|
||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||
auto2top: true,
|
||||
}
|
||||
</script>
|
||||
<script src="//unpkg.com/docsify/lib/docsify.min.js"></script>
|
||||
<script src="//unpkg.com/docsify/lib/plugins/search.min.js"></script>
|
||||
<script src="//unpkg.com/docsify/lib/plugins/ga.min.js"></script>
|
||||
|
||||
</html>
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
## Directory where etcd data stored
|
||||
etcd_data_dir: /var/lib/etcd
|
||||
|
||||
@@ -10,33 +9,31 @@ bin_dir: /usr/local/bin
|
||||
## this node for example. The access_ip is really useful AWS and Google
|
||||
## environments where the nodes are accessed remotely by the "public" ip,
|
||||
## but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
#access_ip: 1.1.1.1
|
||||
|
||||
|
||||
## External LB example config
|
||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||
# loadbalancer_apiserver:
|
||||
#loadbalancer_apiserver:
|
||||
# address: 1.2.3.4
|
||||
# port: 1234
|
||||
|
||||
## Internal loadbalancers for apiservers
|
||||
# loadbalancer_apiserver_localhost: true
|
||||
#loadbalancer_apiserver_localhost: true
|
||||
|
||||
## Local loadbalancer should use this port
|
||||
## And must be set port 6443
|
||||
nginx_kube_apiserver_port: 6443
|
||||
## If nginx_kube_apiserver_healthcheck_port variable defined, enables proxy liveness check.
|
||||
nginx_kube_apiserver_healthcheck_port: 8081
|
||||
## Local loadbalancer should use this port instead, if defined.
|
||||
## Defaults to kube_apiserver_port (6443)
|
||||
#nginx_kube_apiserver_port: 8443
|
||||
|
||||
### OTHER OPTIONAL VARIABLES
|
||||
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
||||
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
||||
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
||||
## modules.
|
||||
# kubelet_load_modules: false
|
||||
#kubelet_load_modules: false
|
||||
|
||||
## Upstream dns servers
|
||||
# upstream_dns_servers:
|
||||
## Upstream dns servers used by dnsmasq
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
|
||||
@@ -44,46 +41,52 @@ nginx_kube_apiserver_healthcheck_port: 8081
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using openstack-client before starting the playbook.
|
||||
## like you would do when using nova-client before starting the playbook.
|
||||
## Note: The 'external' cloud provider is not supported.
|
||||
## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
|
||||
# cloud_provider:
|
||||
#cloud_provider:
|
||||
|
||||
## kubeadm deployment mode
|
||||
kubeadm_enabled: true
|
||||
|
||||
# Skip alert information
|
||||
skip_non_kubeadm_warning: false
|
||||
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
#http_proxy: ""
|
||||
#https_proxy: ""
|
||||
|
||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||
# no_proxy: ""
|
||||
#no_proxy: ""
|
||||
|
||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||
# download_validate_certs: False
|
||||
#download_validate_certs: False
|
||||
|
||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||
# additional_no_proxy: ""
|
||||
#additional_no_proxy: ""
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
## Option is "script", "none"
|
||||
## note: vault is removed
|
||||
# cert_management: script
|
||||
#cert_management: script
|
||||
|
||||
## Set to true to allow pre-checks to fail and continue deployment
|
||||
# ignore_assert_errors: false
|
||||
#ignore_assert_errors: false
|
||||
|
||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||
# kube_read_only_port: 10255
|
||||
#kube_read_only_port: 10255
|
||||
|
||||
## Set true to download and cache container
|
||||
# download_container: true
|
||||
#download_container: true
|
||||
|
||||
## Deploy container engine
|
||||
# Set false if you want to deploy container engine manually.
|
||||
# deploy_container_engine: true
|
||||
#deploy_container_engine: true
|
||||
|
||||
## Set Pypi repo and cert accordingly
|
||||
# pyrepo_index: https://pypi.example.com/simple
|
||||
# pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
|
||||
#pyrepo_index: https://pypi.example.com/simple
|
||||
#pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
## When azure is used, you need to also set the following variables.
|
||||
## see docs/azure.md for details on how to get these values
|
||||
|
||||
# azure_tenant_id:
|
||||
# azure_subscription_id:
|
||||
# azure_aad_client_id:
|
||||
# azure_aad_client_secret:
|
||||
# azure_resource_group:
|
||||
# azure_location:
|
||||
# azure_subnet_name:
|
||||
# azure_security_group_name:
|
||||
# azure_vnet_name:
|
||||
# azure_vnet_resource_group:
|
||||
# azure_route_table_name:
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
#azure_vnet_resource_group:
|
||||
#azure_route_table_name:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
## Does coreos need auto upgrade, default is true
|
||||
# coreos_auto_upgrade: true
|
||||
#coreos_auto_upgrade: true
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
---
|
||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||
## Please note that overlay2 is only supported on newer kernels
|
||||
# docker_storage_options: -s overlay2
|
||||
#docker_storage_options: -s overlay2
|
||||
|
||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
|
||||
docker_container_storage_setup: false
|
||||
|
||||
## It must be define a disk path for docker_container_storage_setup_devs.
|
||||
## Otherwise docker-storage-setup will be executed incorrectly.
|
||||
# docker_container_storage_setup_devs: /dev/vdb
|
||||
#docker_container_storage_setup_devs: /dev/vdb
|
||||
|
||||
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
||||
docker_dns_servers_strict: false
|
||||
@@ -33,12 +32,12 @@ docker_rpm_keepcache: 0
|
||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||
## Can be ipaddress and domain_name.
|
||||
## example define 172.19.16.11 or mirror.registry.io
|
||||
# docker_insecure_registries:
|
||||
#docker_insecure_registries:
|
||||
# - mirror.registry.io
|
||||
# - 172.19.16.11
|
||||
|
||||
## Add other registry,example China registry mirror.
|
||||
# docker_registry_mirrors:
|
||||
#docker_registry_mirrors:
|
||||
# - https://registry.docker-cn.com
|
||||
# - https://mirror.aliyuncs.com
|
||||
|
||||
@@ -47,7 +46,7 @@ docker_rpm_keepcache: 0
|
||||
## or private, which control whether mounts in the file system
|
||||
## namespace set up for docker will receive or propagate mounts
|
||||
## and unmounts. Leave empty for system default
|
||||
# docker_mount_flags:
|
||||
#docker_mount_flags:
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
@@ -58,10 +57,10 @@ docker_options: >-
|
||||
{% if docker_registry_mirrors is defined %}
|
||||
{{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
|
||||
{%- endif %}
|
||||
{%- if docker_version != "latest" and docker_version is version('17.05', '<') %}
|
||||
--graph={{ docker_daemon_graph }} {% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}
|
||||
{%- if docker_version is version('17.05', '<') %}
|
||||
--graph={{ docker_daemon_graph }} {{ docker_log_opts }}
|
||||
{%- else %}
|
||||
--data-root={{ docker_daemon_graph }} {% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}
|
||||
--data-root={{ docker_daemon_graph }} {{ docker_log_opts }}
|
||||
{%- endif %}
|
||||
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
|
||||
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
|
||||
|
||||
@@ -1,28 +1,25 @@
|
||||
## When Oracle Cloud Infrastructure is used, set these variables
|
||||
# oci_private_key:
|
||||
# oci_region_id:
|
||||
# oci_tenancy_id:
|
||||
# oci_user_id:
|
||||
# oci_user_fingerprint:
|
||||
# oci_compartment_id:
|
||||
# oci_vnc_id:
|
||||
# oci_subnet1_id:
|
||||
# oci_subnet2_id:
|
||||
#oci_private_key:
|
||||
#oci_region_id:
|
||||
#oci_tenancy_id:
|
||||
#oci_user_id:
|
||||
#oci_user_fingerprint:
|
||||
#oci_compartment_id:
|
||||
#oci_vnc_id:
|
||||
#oci_subnet1_id:
|
||||
#oci_subnet2_id:
|
||||
## Overide these default/optional behaviors if you wish
|
||||
# oci_security_list_management: All
|
||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
# oci_security_lists:
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
# oci_use_instance_principals: false
|
||||
# oci_cloud_controller_version: 0.6.0
|
||||
## If you would like to control OCI query rate limits for the controller
|
||||
# oci_rate_limit:
|
||||
# rate_limit_qps_read:
|
||||
# rate_limit_qps_write:
|
||||
# rate_limit_bucket_read:
|
||||
# rate_limit_bucket_write:
|
||||
## Other optional variables
|
||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
|
||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
|
||||
#oci_security_list_management: All
|
||||
# If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
#oci_security_lists:
|
||||
#ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
#ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
#oci_use_instance_principals: false
|
||||
#oci_cloud_controller_version: 0.6.0
|
||||
# If you would like to control OCI query rate limits for the controller
|
||||
#oci_rate_limit:
|
||||
#rate_limit_qps_read:
|
||||
#rate_limit_qps_write:
|
||||
#rate_limit_bucket_read:
|
||||
#rate_limit_bucket_write:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# # When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
# openstack_blockstorage_ignore_volume_az: yes
|
||||
# # When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
# openstack_lbaas_enabled: True
|
||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
# # To enable automatic floating ip provisioning, specify a subnet.
|
||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
# # Override default LBaaS behavior
|
||||
# openstack_lbaas_use_octavia: False
|
||||
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# openstack_lbaas_provider: "haproxy"
|
||||
# openstack_lbaas_create_monitor: "yes"
|
||||
# openstack_lbaas_monitor_delay: "1m"
|
||||
# openstack_lbaas_monitor_timeout: "30s"
|
||||
# openstack_lbaas_monitor_max_retries: "3"
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
#openstack_blockstorage_ignore_volume_az: yes
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
#openstack_lbaas_enabled: True
|
||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
## Override default LBaaS behavior
|
||||
#openstack_lbaas_use_octavia: False
|
||||
#openstack_lbaas_method: "ROUND_ROBIN"
|
||||
#openstack_lbaas_provider: "haproxy"
|
||||
#openstack_lbaas_create_monitor: "yes"
|
||||
#openstack_lbaas_monitor_delay: "1m"
|
||||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
#openstack_lbaas_monitor_max_retries: "3"
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
## Etcd auto compaction retention for mvcc key value store in hour
|
||||
# etcd_compaction_retention: 0
|
||||
#etcd_compaction_retention: 0
|
||||
|
||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||
# etcd_metrics: basic
|
||||
#etcd_metrics: basic
|
||||
|
||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||
# etcd_memory_limit: "512M"
|
||||
#etcd_memory_limit: "512M"
|
||||
|
||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||
## etcd documentation for more information.
|
||||
# etcd_quota_backend_bytes: "2G"
|
||||
#etcd_quota_backend_bytes: "2G"
|
||||
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
# etcd_peer_client_auth: true
|
||||
#etcd_peer_client_auth: true
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
@@ -18,31 +17,22 @@ metrics_server_enabled: false
|
||||
# metrics_server_metric_resolution: 60s
|
||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
|
||||
|
||||
# Rancher Local Path Provisioner
|
||||
local_path_provisioner_enabled: false
|
||||
# local_path_provisioner_namespace: "local-path-storage"
|
||||
# local_path_provisioner_storage_class: "local-path"
|
||||
# local_path_provisioner_reclaim_policy: Delete
|
||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||
# local_path_provisioner_debug: false
|
||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
|
||||
# local_path_provisioner_image_tag: "v0.0.2"
|
||||
|
||||
# Local volume provisioner deployment
|
||||
local_volume_provisioner_enabled: false
|
||||
# local_volume_provisioner_namespace: kube-system
|
||||
# local_volume_provisioner_storage_classes:
|
||||
# local-storage:
|
||||
# host_dir: /mnt/disks
|
||||
# mount_dir: /mnt/disks
|
||||
# fast-disks:
|
||||
# host_dir: /mnt/fast-disks
|
||||
# mount_dir: /mnt/fast-disks
|
||||
# block_cleaner_command:
|
||||
# - "/scripts/shred.sh"
|
||||
# - "2"
|
||||
# volume_mode: Filesystem
|
||||
# fs_type: ext4
|
||||
# - name: "{{ local_volume_provisioner_storage_class | default('local-storage') }}"
|
||||
# host_dir: "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}"
|
||||
# mount_dir: "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}"
|
||||
# - name: "local-ssd"
|
||||
# host_dir: "/mnt/local-storage/ssd"
|
||||
# mount_dir: "/mnt/local-storage/ssd"
|
||||
# - name: "local-hdd"
|
||||
# host_dir: "/mnt/local-storage/hdd"
|
||||
# mount_dir: "/mnt/local-storage/hdd"
|
||||
# - name: "local-shared"
|
||||
# host_dir: "/mnt/local-storage/shared"
|
||||
# mount_dir: "/mnt/local-storage/shared"
|
||||
|
||||
# CephFS provisioner deployment
|
||||
cephfs_provisioner_enabled: false
|
||||
@@ -60,11 +50,11 @@ cephfs_provisioner_enabled: false
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
# ingress_nginx_nodeselector:
|
||||
# node-role.kubernetes.io/node: ""
|
||||
# node-role.kubernetes.io/master: ""
|
||||
# ingress_nginx_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# ingress_nginx_namespace: "ingress-nginx"
|
||||
# ingress_nginx_insecure_port: 80
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
# Kubernetes configuration dirs and system namespace.
|
||||
# Those are where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernetes.
|
||||
@@ -20,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.13.5
|
||||
kube_version: v1.12.7
|
||||
|
||||
# kubernetes image repo define
|
||||
kube_image_repo: "gcr.io/google-containers"
|
||||
@@ -52,9 +51,9 @@ kube_users:
|
||||
- system:masters
|
||||
|
||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||
# kube_oidc_auth: false
|
||||
# kube_basic_auth: false
|
||||
# kube_token_auth: false
|
||||
#kube_oidc_auth: false
|
||||
#kube_basic_auth: false
|
||||
#kube_token_auth: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
@@ -74,9 +73,6 @@ kube_users:
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
kube_network_plugin_multus: false
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
@@ -93,7 +89,7 @@ kube_network_node_prefix: 24
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
# kube_apiserver_insecure_port: 8080 # (http)
|
||||
#kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
@@ -101,23 +97,10 @@ kube_apiserver_insecure_port: 0 # (disabled)
|
||||
# Can be ipvs, iptables
|
||||
kube_proxy_mode: ipvs
|
||||
|
||||
# A string slice of values which specify the addresses to use for NodePorts.
|
||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
|
||||
# The default empty string slice ([]) means to use all local addresses.
|
||||
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
|
||||
kube_proxy_nodeport_addresses: >-
|
||||
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
|
||||
[{{ kube_proxy_nodeport_addresses_cidr }}]
|
||||
{%- else -%}
|
||||
[]
|
||||
{%- endif -%}
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
# kube_override_hostname: >-
|
||||
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
# {%- else -%}
|
||||
# {{ inventory_hostname }}
|
||||
# {%- endif -%}
|
||||
# Kube-proxy nodeport address.
|
||||
# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
|
||||
kube_proxy_nodeport_addresses: false
|
||||
# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
|
||||
|
||||
## Encrypting Secret Data at Rest (experimental)
|
||||
kube_encrypt_secret_data: false
|
||||
@@ -127,13 +110,10 @@ kube_encrypt_secret_data: false
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# Can be coredns, coredns_dual, manual or none
|
||||
# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
|
||||
dns_mode: coredns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
# manual_dns_server: 10.x.x.x
|
||||
# Enable nodelocal dns cache
|
||||
enable_nodelocaldns: False
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
#manual_dns_server: 10.x.x.x
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
@@ -142,6 +122,7 @@ deploy_netchecker: false
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
||||
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
## Container runtime
|
||||
@@ -163,7 +144,7 @@ kubernetes_audit: false
|
||||
dynamic_kubelet_configuration: false
|
||||
|
||||
# define kubelet config dir for dynamic kubelet
|
||||
# kubelet_config_dir:
|
||||
#kubelet_config_dir:
|
||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||
|
||||
@@ -175,6 +156,10 @@ podsecuritypolicy_enabled: false
|
||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||
# kubectl_localhost: false
|
||||
|
||||
# dnsmasq
|
||||
# dnsmasq_upstream_dns_servers:
|
||||
# - /resolvethiszone.with/10.0.4.250
|
||||
# - 8.8.8.8
|
||||
|
||||
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
||||
# kubelet_cgroups_per_qos: true
|
||||
@@ -206,8 +191,3 @@ persistent_volumes_enabled: false
|
||||
# nvidia_driver_version: "384.111"
|
||||
## flavor can be tesla or gtx
|
||||
# nvidia_gpu_flavor: gtx
|
||||
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
|
||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
|
||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
|
||||
## NVIDIA GPU device plugin image.
|
||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
## The subnets of each nodes will be distributed by the datacenter router
|
||||
# peer_with_router: false
|
||||
#peer_with_router: false
|
||||
|
||||
# Enables Internet connectivity from containers
|
||||
# nat_outgoing: true
|
||||
@@ -11,9 +11,6 @@
|
||||
# add default ippool name
|
||||
# calico_pool_name: "default-pool"
|
||||
|
||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
||||
# calico_pool_cidr: 1.2.3.4/5
|
||||
|
||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||
# global_as_num: "64512"
|
||||
|
||||
@@ -21,6 +18,3 @@
|
||||
# not be specified in calico CNI config, so Calico will use built-in
|
||||
# defaults. The value should be a number, not a string.
|
||||
# calico_mtu: 1500
|
||||
|
||||
# Advertise Cluster IPs
|
||||
# calico_advertise_cluster_ips: true
|
||||
|
||||
@@ -8,3 +8,4 @@
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
# canal_masquerade: "true"
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
|
||||
## In this case, you may need to peer with an uplink
|
||||
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
|
||||
# contiv_peer_with_uplink_leaf: false
|
||||
# contiv_global_as: "65002"
|
||||
# contiv_global_neighbor_as: "500"
|
||||
#contiv_peer_with_uplink_leaf: false
|
||||
#contiv_global_as: "65002"
|
||||
#contiv_global_neighbor_as: "500"
|
||||
|
||||
# Fabric mode: aci, aci-opflex or default
|
||||
# contiv_fabric_mode: default
|
||||
|
||||
@@ -19,9 +19,6 @@
|
||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_loadbalancer_ip: false
|
||||
|
||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||
# kube_router_enable_dsr: false
|
||||
|
||||
# Array of arbitrary extra arguments to kube-router, see
|
||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
|
||||
# kube_router_extra_args: []
|
||||
|
||||
@@ -71,13 +71,6 @@ options:
|
||||
latest handles creating or updating based on existence,
|
||||
reloaded handles updating resource(s) definition using definition file,
|
||||
stopped handles stopping resource(s) based on other options.
|
||||
recursive:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- Process the directory used in -f, --filename recursively.
|
||||
Useful when you want to manage related manifests organized
|
||||
within the same directory.
|
||||
requirements:
|
||||
- kubectl
|
||||
author: "Kenny Jones (@kenjones-cisco)"
|
||||
@@ -127,14 +120,12 @@ class KubeManager(object):
|
||||
if module.params.get('namespace'):
|
||||
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||
|
||||
|
||||
self.all = module.params.get('all')
|
||||
self.force = module.params.get('force')
|
||||
self.name = module.params.get('name')
|
||||
self.filename = [f.strip() for f in module.params.get('filename') or []]
|
||||
self.resource = module.params.get('resource')
|
||||
self.label = module.params.get('label')
|
||||
self.recursive = module.params.get('recursive')
|
||||
|
||||
def _execute(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
@@ -164,9 +155,6 @@ class KubeManager(object):
|
||||
if force:
|
||||
cmd.append('--force')
|
||||
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to create')
|
||||
|
||||
@@ -181,9 +169,6 @@ class KubeManager(object):
|
||||
if force:
|
||||
cmd.append('--force')
|
||||
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to reload')
|
||||
|
||||
@@ -200,8 +185,6 @@ class KubeManager(object):
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to delete without filename')
|
||||
@@ -220,9 +203,6 @@ class KubeManager(object):
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def exists(self):
|
||||
@@ -230,8 +210,6 @@ class KubeManager(object):
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required without filename')
|
||||
@@ -264,8 +242,6 @@ class KubeManager(object):
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
if self.recursive:
|
||||
cmd.append('--recursive={}'.format(self.recursive))
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to stop without filename')
|
||||
@@ -302,7 +278,6 @@ def main():
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
recursive=dict(default=False, type='bool'),
|
||||
),
|
||||
mutually_exclusive=[['filename', 'list']]
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.6"
|
||||
- name: "Check ansible version !=2.7.0"
|
||||
assert:
|
||||
msg: "Ansible must be v2.7.6 or higher"
|
||||
msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.6", ">=")
|
||||
- ansible_version.string is version("2.7.0", "!=")
|
||||
- ansible_version.string is version("2.5.0", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
ansible>=2.7.6
|
||||
ansible>=2.5.0,!=2.7.0
|
||||
jinja2>=2.9.6
|
||||
netaddr
|
||||
pbr>=1.6
|
||||
hvac
|
||||
jmespath
|
||||
ruamel.yaml
|
||||
|
||||
14
reset.yml
14
reset.yml
@@ -1,23 +1,17 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.6"
|
||||
- name: "Check ansible version !=2.7.0"
|
||||
assert:
|
||||
msg: "Ansible must be v2.7.6 or higher"
|
||||
msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.6", ">=")
|
||||
- ansible_version.string is version("2.7.0", "!=")
|
||||
- ansible_version.string is version("2.5.0", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
---
|
||||
bootstrap_os:
|
||||
os_family: "{{bootstrap_os}}"
|
||||
|
||||
pip_python_coreos_modules:
|
||||
- httplib2
|
||||
- six
|
||||
|
||||
override_system_hostname: true
|
||||
coreos_auto_upgrade: true
|
||||
|
||||
# Install epel repo on Centos/RHEL
|
||||
epel_enabled: false
|
||||
|
||||
# CentOS/RedHat Extras repo
|
||||
extras_rh_repo_base_url: "http://mirror.centos.org/centos/$releasever/extras/$basearch/"
|
||||
extras_rh_repo_gpgkey: "http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7"
|
||||
# Caching extras packages after installation
|
||||
extras_rh_rpm_keepcache: 0
|
||||
21
roles/bootstrap-os/files/bootstrap.sh
Executable file → Normal file
21
roles/bootstrap-os/files/bootstrap.sh
Executable file → Normal file
@@ -11,12 +11,25 @@ if [[ -e $BINDIR/.bootstrapped ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
PYPY_VERSION=7.0.0
|
||||
PYPY_VERSION=5.1.0
|
||||
|
||||
wget -O - https://bitbucket.org/squeaky/portable-pypy/downloads/pypy3.5-$PYPY_VERSION-linux_x86_64-portable.tar.bz2 | tar -xjf -
|
||||
mv -n pypy3.5-$PYPY_VERSION-linux_x86_64-portable pypy3
|
||||
wget -O - https://bitbucket.org/pypy/pypy/downloads/pypy-$PYPY_VERSION-linux64.tar.bz2 |tar -xjf -
|
||||
mv -n pypy-$PYPY_VERSION-linux64 pypy
|
||||
|
||||
ln -s ./pypy3/bin/pypy3 python
|
||||
## library fixup
|
||||
mkdir -p pypy/lib
|
||||
if [ -f /lib64/libncurses.so.5.9 ]; then
|
||||
ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
elif [ -f /lib64/libncurses.so.6.1 ]; then
|
||||
ln -snf /lib64/libncurses.so.6.1 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
fi
|
||||
|
||||
cat > $BINDIR/python <<EOF
|
||||
#!/bin/bash
|
||||
LD_LIBRARY_PATH=$BINDIR/pypy/lib:$LD_LIBRARY_PATH exec $BINDIR/pypy/bin/pypy "\$@"
|
||||
EOF
|
||||
|
||||
chmod +x $BINDIR/python
|
||||
$BINDIR/python --version
|
||||
|
||||
touch $BINDIR/.bootstrapped
|
||||
|
||||
19017
roles/bootstrap-os/files/get-pip.py
Normal file
19017
roles/bootstrap-os/files/get-pip.py
Normal file
File diff suppressed because it is too large
Load Diff
3
roles/bootstrap-os/files/runner
Normal file
3
roles/bootstrap-os/files/runner
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
BINDIR="/opt/bin"
|
||||
LD_LIBRARY_PATH=$BINDIR/pypy/lib:$LD_LIBRARY_PATH $BINDIR/pypy/bin/$(basename $0) $@
|
||||
@@ -1,11 +1,4 @@
|
||||
---
|
||||
- name: Check if atomic host
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
register: ostree
|
||||
|
||||
- set_fact:
|
||||
is_atomic: "{{ ostree.stat.exists }}"
|
||||
|
||||
- name: Check presence of fastestmirror.conf
|
||||
stat:
|
||||
@@ -19,72 +12,26 @@
|
||||
regexp: "^enabled=.*"
|
||||
line: "enabled=0"
|
||||
state: present
|
||||
become: true
|
||||
when: fastestmirror.stat.exists
|
||||
|
||||
- name: Add proxy to /etc/yum.conf if http_proxy is defined
|
||||
lineinfile:
|
||||
path: "/etc/yum.conf"
|
||||
line: "proxy={{ http_proxy }}"
|
||||
line: "proxy={{http_proxy}}"
|
||||
create: yes
|
||||
state: present
|
||||
become: true
|
||||
when: http_proxy is defined
|
||||
|
||||
- name: Install libselinux-python and yum-utils for bootstrap
|
||||
- name: Install packages requirements for bootstrap
|
||||
yum:
|
||||
name:
|
||||
name: "{{ packages }}"
|
||||
state: present
|
||||
vars:
|
||||
packages:
|
||||
- libselinux-python
|
||||
- yum-utils
|
||||
state: present
|
||||
become: true
|
||||
when:
|
||||
- not is_atomic
|
||||
|
||||
- name: Check python-pip package
|
||||
yum:
|
||||
list=python-pip
|
||||
register: package_python_pip
|
||||
when:
|
||||
- not is_atomic
|
||||
|
||||
- name: Install epel-release for bootstrap
|
||||
yum:
|
||||
name: epel-release
|
||||
state: present
|
||||
become: true
|
||||
when:
|
||||
- epel_enabled
|
||||
- not is_atomic
|
||||
- package_python_pip.results | length != 0
|
||||
|
||||
- name: check python-httplib2 package
|
||||
yum:
|
||||
list: "python-httplib2"
|
||||
register: package_python_httplib2
|
||||
when:
|
||||
- not is_atomic
|
||||
|
||||
- name: Configure extras repository if python-httplib2 not avaiable in current repos
|
||||
yum_repository:
|
||||
name: extras
|
||||
description: "CentOS-7 - Extras"
|
||||
state: present
|
||||
baseurl: "{{ extras_rh_repo_base_url }}"
|
||||
file: "extras"
|
||||
gpgcheck: yes
|
||||
gpgkey: "{{extras_rh_repo_gpgkey}}"
|
||||
keepcache: "{{ extras_rh_rpm_keepcache | default('1') }}"
|
||||
proxy: " {{ http_proxy | default('_none_') }}"
|
||||
when:
|
||||
- not is_atomic
|
||||
- package_python_httplib2.results | length == 0
|
||||
- epel-release
|
||||
|
||||
- name: Install pip for bootstrap
|
||||
yum:
|
||||
name: python-pip
|
||||
state: present
|
||||
become: true
|
||||
when:
|
||||
- not is_atomic
|
||||
- package_python_pip.results | length != 0
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: Install basic packages to run containers
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- containers-basic
|
||||
|
||||
- name: Make sure docker service is enabled
|
||||
systemd:
|
||||
name: docker
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
state: started
|
||||
become: true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user