mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-04 08:48:42 +03:00
Compare commits
3 Commits
v2.4.0
...
test-tag-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a222be7fae | ||
|
|
9d43cd86be | ||
|
|
6ed99f1f44 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -24,7 +24,7 @@ explain why.
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
||||
**Network plugin used**:
|
||||
|
||||
87
.gitignore
vendored
87
.gitignore
vendored
@@ -1,95 +1,14 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
inventory/vagrant_ansible_inventory
|
||||
inventory/group_vars/fake_hosts.yml
|
||||
inventory/host_vars/
|
||||
temp
|
||||
.idea
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
*.egg-info
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
vagrant/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
artifacts/
|
||||
env/
|
||||
build/
|
||||
credentials/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# IPython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
387
.gitlab-ci.yml
387
.gitlab-ci.yml
@@ -18,8 +18,12 @@ variables:
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install -r tests/requirements.txt
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
@@ -39,20 +43,18 @@ before_script:
|
||||
GCE_USER: travis
|
||||
SSH_USER: $GCE_USER
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CONTAINER_ENGINE: docker
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CLOUD_MACHINE_TYPE: "g1-small"
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
BOOTSTRAP_OS: none
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
KUBEADM_ENABLED: "false"
|
||||
RESOLVCONF_MODE: docker_dns
|
||||
LOG_LEVEL: "-vv"
|
||||
ETCD_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
MAGIC: "ci check this"
|
||||
|
||||
.gce: &gce
|
||||
@@ -65,249 +67,241 @@ before_script:
|
||||
- $HOME/.cache
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install -r tests/requirements.txt
|
||||
- pip install ansible==2.2.1.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- ansible-playbook --version
|
||||
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
||||
- echo "PYPATH is $PYPATH"
|
||||
- cp tests/ansible.cfg .
|
||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
script:
|
||||
- pwd
|
||||
- ls
|
||||
- echo ${PWD}
|
||||
- echo "${STARTUP_SCRIPT}"
|
||||
- >
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||
${LOG_LEVEL}
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e preemptible=$GCE_PREEMPTIBLE
|
||||
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kargo repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||
|
||||
|
||||
# Create cluster
|
||||
- >
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-e deploy_netchecker=true
|
||||
-e download_localhost=true
|
||||
-e download_run_once=true
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
--limit "all:!fake_hosts"
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
cluster.yml
|
||||
|
||||
# Repeat deployment if testing upgrade
|
||||
- >
|
||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||
git checkout "${CI_BUILD_REF}";
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
--limit "all:!fake_hosts"
|
||||
$PLAYBOOK;
|
||||
fi
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
|
||||
## Ping the between 2 pod
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
|
||||
## Advanced DNS checks
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
## Idempotency checks 1/5 (repeat deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
--limit "all:!fake_hosts"
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
--limit "all:!fake_hosts"
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
## Idempotency checks 3/5 (reset deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e reset_confirmation=yes
|
||||
--limit "all:!fake_hosts"
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
reset.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 4/5 (redeploy after reset)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i inventory/inventory.ini
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
--limit "all:!fake_hosts"
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e deploy_netchecker=true
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
--limit "all:!fake_hosts"
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
after_script:
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e @${CI_TEST_VARS}
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Test matrix. Leave the comments for markup scripts.
|
||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLUSTER_MODE: separate
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
.debian8_canal_ha_variables: &debian8_canal_ha_variables
|
||||
# stage: deploy-gce-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-gce-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: debian-8-kubespray
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: ha
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||
# stage: deploy-gce-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.debian8_calico_variables: &debian8_calico_variables
|
||||
# stage: deploy-gce-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: debian-8-kubespray
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: default
|
||||
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: separate
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
IDEMPOT_CHECK: "false"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
IDEMPOT_CHECK: "true"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: coreos-alpha
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: ha
|
||||
BOOTSTRAP_OS: coreos
|
||||
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-gce-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
ETCD_DEPLOYMENT: rkt
|
||||
KUBELET_DEPLOYMENT: rkt
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
coreos-calico-aio:
|
||||
coreos-calico-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
<<: *coreos_calico_sep_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
@@ -318,28 +312,28 @@ coreos-calico-sep-triggers:
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
<<: *coreos_calico_sep_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos7-flannel-addons:
|
||||
centos7-flannel:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_addons_variables
|
||||
<<: *centos7_flannel_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
centos7-flannel-addons-triggers:
|
||||
centos7-flannel-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_addons_variables
|
||||
<<: *centos7_flannel_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
@@ -365,80 +359,27 @@ ubuntu-weave-sep-triggers:
|
||||
only: ['triggers']
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
ubuntu-canal-ha:
|
||||
debian8-canal-ha:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_ha_variables
|
||||
<<: *debian8_canal_ha_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-canal-ha-triggers:
|
||||
debian8-canal-ha-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_ha_variables
|
||||
<<: *debian8_canal_ha_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
ubuntu-canal-kubeadm:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_kubeadm_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos-weave-kubeadm:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
centos-weave-kubeadm-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
ubuntu-contiv-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_contiv_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
rhel7-weave:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
@@ -460,7 +401,7 @@ rhel7-weave-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
debian8-calico-upgrade:
|
||||
debian8-calico:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
@@ -567,28 +508,6 @@ ubuntu-rkt-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-vault-sep:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_vault_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-flannel-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# Premoderated with manual actions
|
||||
ci-authorized:
|
||||
<<: *job
|
||||
@@ -598,22 +517,12 @@ ci-authorized:
|
||||
script:
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
|
||||
syntax-check:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||
except: ['triggers', 'master']
|
||||
|
||||
yamllint:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- yamllint roles
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
|
||||
161
.travis.yml.bak
Normal file
161
.travis.yml.bak
Normal file
@@ -0,0 +1,161 @@
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
git:
|
||||
depth: 5
|
||||
|
||||
env:
|
||||
global:
|
||||
GCE_USER=travis
|
||||
SSH_USER=$GCE_USER
|
||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||
CONTAINER_ENGINE=docker
|
||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||
GS_ACCESS_KEY_ID=$GS_KEY
|
||||
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||
CLUSTER_MODE=default
|
||||
BOOTSTRAP_OS=none
|
||||
matrix:
|
||||
# Debian Jessie
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=asia-east1-a
|
||||
CLUSTER_MODE=ha
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=europe-west1-c
|
||||
CLUSTER_MODE=default
|
||||
|
||||
# Centos 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=centos-7
|
||||
CLOUD_REGION=asia-northeast1-c
|
||||
CLUSTER_MODE=default
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=centos-7
|
||||
CLOUD_REGION=us-central1-b
|
||||
CLUSTER_MODE=ha
|
||||
|
||||
# Redhat 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=rhel-7
|
||||
CLOUD_REGION=us-east1-c
|
||||
CLUSTER_MODE=default
|
||||
|
||||
# CoreOS stable
|
||||
#- >-
|
||||
# KUBE_NETWORK_PLUGIN=weave
|
||||
# CLOUD_IMAGE=coreos-stable
|
||||
# CLOUD_REGION=europe-west1-b
|
||||
# CLUSTER_MODE=ha
|
||||
# BOOTSTRAP_OS=coreos
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=coreos-stable
|
||||
CLOUD_REGION=us-west1-b
|
||||
CLUSTER_MODE=default
|
||||
BOOTSTRAP_OS=coreos
|
||||
|
||||
# Extra cases for separated roles
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=canal
|
||||
CLOUD_IMAGE=rhel-7
|
||||
CLOUD_REGION=asia-northeast1-b
|
||||
CLUSTER_MODE=separate
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=europe-west1-d
|
||||
CLUSTER_MODE=separate
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=coreos-stable
|
||||
CLOUD_REGION=us-central1-f
|
||||
CLUSTER_MODE=separate
|
||||
BOOTSTRAP_OS=coreos
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
||||
|
||||
before_install:
|
||||
# Install Ansible.
|
||||
- pip install --user ansible
|
||||
- pip install --user netaddr
|
||||
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
||||
- pip install --user apache-libcloud==0.20.1
|
||||
- pip install --user boto==2.9.0 -U
|
||||
# Load cached docker images
|
||||
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
||||
|
||||
cache:
|
||||
- directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.local
|
||||
- /var/tmp/releases
|
||||
|
||||
before_script:
|
||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||
- $HOME/.local/bin/ansible-playbook --version
|
||||
- cp tests/ansible.cfg .
|
||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||
|
||||
script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Create cluster with netchecker app deployed
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e download_run_once=true
|
||||
-e download_localhost=true
|
||||
-e local_release_dir=/var/tmp/releases
|
||||
-e deploy_netchecker=true
|
||||
cluster.yml
|
||||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
## Ping the between 2 pod
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
## Advanced DNS checks
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
after_script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
16
.yamllint
16
.yamllint
@@ -1,16 +0,0 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: consistent
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
truthy: disable
|
||||
65
README.md
65
README.md
@@ -1,8 +1,8 @@
|
||||

|
||||
|
||||
## Deploy a production ready kubernetes cluster
|
||||
##Deploy a production ready kubernetes cluster
|
||||
|
||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **#kubespray**.
|
||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||
- **High available** cluster
|
||||
@@ -13,29 +13,26 @@ If you have questions, join us on the [kubernetes slack](https://kubernetes.slac
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
|
||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||
|
||||
|
||||
* [Requirements](#requirements)
|
||||
* [Kubespray vs ...](docs/comparisons.md)
|
||||
* [Kargo vs ...](docs/comparisons.md)
|
||||
* [Getting started](docs/getting-started.md)
|
||||
* [Ansible inventory and tags](docs/ansible.md)
|
||||
* [Integration with existing ansible repo](docs/integration.md)
|
||||
* [Deployment data variables](docs/vars.md)
|
||||
* [DNS stack](docs/dns-stack.md)
|
||||
* [HA mode](docs/ha-mode.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Vagrant install](docs/vagrant.md)
|
||||
* [CoreOS bootstrap](docs/coreos.md)
|
||||
* [Debian Jessie setup](docs/debian.md)
|
||||
* [Downloaded artifacts](docs/downloads.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [OpenStack](docs/openstack.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Azure](docs/azure.md)
|
||||
* [vSphere](docs/vsphere.md)
|
||||
* [Large deployments](docs/large-deployments.md)
|
||||
* [Upgrades basics](docs/upgrades.md)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
@@ -53,20 +50,16 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
Versions of supported components
|
||||
--------------------------------
|
||||
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||
[contiv](https://github.com/contiv/install/releases) v1.0.3 <br>
|
||||
[weave](http://weave.works/) v2.0.1 <br>
|
||||
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||
[weave](http://weave.works/) v1.6.1 <br>
|
||||
[docker](https://www.docker.com/) v1.12.5 <br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
||||
|
||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||
Note: rkt support as docker alternative is limited to control plane (etcd and
|
||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||
plugins' related OS services. Also note, only one of the supported network
|
||||
plugins can be deployed for a given single cluster.
|
||||
@@ -74,19 +67,16 @@ plugins can be deployed for a given single cluster.
|
||||
Requirements
|
||||
--------------
|
||||
|
||||
* **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||
* The target servers are configured to allow **IPv4 forwarding**.
|
||||
* **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
* The target servers are configured to allow **IPv4 forwarding**.
|
||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
||||
* **Ansible v2.2 (or newer) and python-netaddr**
|
||||
|
||||
|
||||
## Network plugins
|
||||
|
||||
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose between 4 network plugins. (default: `flannel` with vxlan backend)
|
||||
|
||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
@@ -94,33 +84,18 @@ You can choose between 4 network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
* [**contiv**](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Community docs and resources
|
||||
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||
|
||||
## Tools and projects on top of Kubespray
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
|
||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
## CI Tests
|
||||
|
||||

|
||||
|
||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
||||
|
||||
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
||||
CI/end-to-end tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
||||
23
RELEASE.md
23
RELEASE.md
@@ -1,16 +1,16 @@
|
||||
# Release Process
|
||||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||
4. The release issue is closed
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
||||
|
||||
## Major/minor releases, merge freezes and milestones
|
||||
|
||||
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
||||
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
||||
branches, and there are no backports. Therefore, there is no need for merge
|
||||
freezes as well.
|
||||
|
||||
@@ -20,21 +20,24 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
support lifetime, which ends once the milestone closed. Then only a next major
|
||||
or minor release can be done.
|
||||
|
||||
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||
Older or newer versions are not supported and not tested for the given release.
|
||||
|
||||
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
||||
[semver](http://semver.org/). Every version describes only a stable release.
|
||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||
the contributed addons or bound versions of Kubernetes and other components, are
|
||||
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||
considered out of Kargo scope and are up to the components' teams to deal with and
|
||||
document.
|
||||
|
||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
foo
|
||||
foo
|
||||
foo
|
||||
|
||||
66
Vagrantfile
vendored
66
Vagrantfile
vendored
@@ -3,38 +3,20 @@
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 1.9.0"
|
||||
Vagrant.require_version ">= 1.8.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
|
||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||
"centos" => {box: "bento/centos-7.3", bootstrap_os: "centos", user: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances = 3
|
||||
$instance_name_prefix = "k8s"
|
||||
$vm_gui = false
|
||||
$vm_memory = 2048
|
||||
$vm_memory = 1536
|
||||
$vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$os = "ubuntu"
|
||||
$network_plugin = "flannel"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances = $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances = $num_instances
|
||||
$local_release_dir = "/vagrant/temp"
|
||||
$box = "bento/ubuntu-16.04"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
@@ -42,7 +24,6 @@ if File.exist?(CONFIG)
|
||||
require CONFIG
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||
|
||||
@@ -68,10 +49,7 @@ Vagrant.configure("2") do |config|
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
config.vm.box = $box
|
||||
if SUPPORTED_OS[$os].has_key? :box_url
|
||||
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||
end
|
||||
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||
|
||||
# plugin conflict
|
||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||
config.vbguest.auto_update = false
|
||||
@@ -102,10 +80,6 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
end
|
||||
|
||||
$shared_folders.each do |src, dst|
|
||||
config.vm.synced_folder src, dst
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.gui = $vm_gui
|
||||
vb.memory = $vm_memory
|
||||
@@ -114,23 +88,14 @@ Vagrant.configure("2") do |config|
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
||||
"local_release_dir" => $local_release_dir,
|
||||
"download_run_once": "False",
|
||||
"kube_network_plugin": $network_plugin
|
||||
"ip" => ip,
|
||||
#"access_ip" => ip,
|
||||
"flannel_interface" => ip,
|
||||
"flannel_backend_type" => "host-gw",
|
||||
"local_release_dir" => "/vagrant/temp",
|
||||
"download_run_once" => "False"
|
||||
}
|
||||
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
# workaround for Vagrant 1.9.1 and centos vm
|
||||
# https://github.com/hashicorp/vagrant/issues/8096
|
||||
if Vagrant::VERSION == "1.9.1" && $os == "centos"
|
||||
config.vm.provision "shell", inline: "service network restart", run: "always"
|
||||
end
|
||||
|
||||
# Disable swap for each vm
|
||||
config.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# Only execute once the Ansible provisioner,
|
||||
# when all the machines are up and ready.
|
||||
@@ -143,13 +108,16 @@ Vagrant.configure("2") do |config|
|
||||
ansible.sudo = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
||||
# The first three nodes should be etcd servers
|
||||
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
||||
# The first two nodes should be masters
|
||||
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
||||
# all nodes should be kube nodes
|
||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
host_key_checking=False
|
||||
@@ -9,6 +9,3 @@ fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
|
||||
89
cluster.yml
89
cluster.yml
@@ -2,115 +2,64 @@
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
- bastion-ssh-config
|
||||
tags: [localhost, bastion]
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
gather_facts: false
|
||||
vars:
|
||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
- bootstrap-os
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- role: rkt
|
||||
tags: rkt
|
||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||
- { role: download, tags: download, skip_downloads: false }
|
||||
environment: "{{proxy_env}}"
|
||||
- { role: rkt, tags: rkt, when: "'rkt' in [ etcd_deployment_type, kubelet_deployment_type ]" }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
- hosts: etcd:!k8s-cluster
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||
|
||||
- hosts: k8s-cluster:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||
environment: "{{proxy_env}}"
|
||||
- { role: etcd, tags: etcd }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: network }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
any_errors_fatal: true
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
@@ -1,3 +1,59 @@
|
||||
# Kubernetes Community Code of Conduct
|
||||
## Kubernetes Community Code of Conduct
|
||||
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||
opening an issue or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||
|
||||
|
||||
[]()
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kubernetes-*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kubernetes-*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:Describe*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:AttachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:DetachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import boto3
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
|
||||
class SearchEC2Tags(object):
|
||||
|
||||
def __init__(self):
|
||||
self.parse_args()
|
||||
if self.args.list:
|
||||
self.search_tags()
|
||||
if self.args.host:
|
||||
data = {}
|
||||
print json.dumps(data, indent=2)
|
||||
|
||||
def parse_args(self):
|
||||
|
||||
##Check if VPC_VISIBILITY is set, if not default to private
|
||||
if "VPC_VISIBILITY" in os.environ:
|
||||
self.vpc_visibility = os.environ['VPC_VISIBILITY']
|
||||
else:
|
||||
self.vpc_visibility = "private"
|
||||
|
||||
##Support --list and --host flags. We largely ignore the host one.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--list', action='store_true', default=False, help='List instances')
|
||||
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def search_tags(self):
|
||||
hosts = {}
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
region = os.environ['REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
|
||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||
for instance in instances:
|
||||
if self.vpc_visibility == "public":
|
||||
hosts[group].append(instance.public_dns_name)
|
||||
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||
'ansible_ssh_host': instance.public_ip_address
|
||||
}
|
||||
else:
|
||||
hosts[group].append(instance.private_dns_name)
|
||||
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||
'ansible_ssh_host': instance.private_ip_address
|
||||
}
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||
|
||||
SearchEC2Tags()
|
||||
@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
|
||||
## Status
|
||||
|
||||
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
|
||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
## Generating an inventory for kargo
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
It will create the file ./inventory which can then be used with kargo, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kubespray-root-dir
|
||||
$ cd kargo-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
||||
@@ -9,18 +9,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
else
|
||||
echo "Azure cli not found"
|
||||
fi
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
@@ -9,10 +9,6 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if az &>/dev/null; then
|
||||
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
ansible-playbook generate-templates.yml
|
||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||
fi
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
AZURE_RESOURCE_GROUP="$1"
|
||||
|
||||
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||
@@ -8,11 +8,5 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||
echo "AZURE_RESOURCE_GROUP is missing"
|
||||
exit 1
|
||||
fi
|
||||
# check if azure cli 2.0 exists else use azure cli 1.0
|
||||
if az &>/dev/null; then
|
||||
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
elif azure &>/dev/null; then
|
||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
else
|
||||
echo "Azure cli not found"
|
||||
fi
|
||||
|
||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
@@ -1,6 +1,5 @@
|
||||
|
||||
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||
# this name must be globally unique - it will be used as a prefix for azure components
|
||||
# Due to some Azure limitations, this name must be globally unique
|
||||
cluster_name: example
|
||||
|
||||
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||
@@ -18,29 +17,10 @@ minions_os_disk_size: 1000
|
||||
|
||||
admin_username: devops
|
||||
admin_password: changeme
|
||||
|
||||
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||
|
||||
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
# Azure CIDRs
|
||||
azure_vnet_cidr: 10.0.0.0/8
|
||||
azure_admin_cidr: 10.241.2.0/24
|
||||
azure_masters_cidr: 10.0.4.0/24
|
||||
azure_minions_cidr: 10.240.0.0/16
|
||||
|
||||
# Azure loadbalancer port to use to access your cluster
|
||||
kube_apiserver_port: 6443
|
||||
|
||||
# Azure Netwoking and storage naming to use with inventory/all.yml
|
||||
#azure_virtual_network_name: KubeVNET
|
||||
#azure_subnet_admin_name: ad-subnet
|
||||
#azure_subnet_masters_name: master-subnet
|
||||
#azure_subnet_minions_name: minion-subnet
|
||||
#azure_route_table_name: routetable
|
||||
#azure_security_group_name: secgroup
|
||||
|
||||
# Storage types available are: "Standard_LRS","Premium_LRS"
|
||||
#azure_storage_account_type: Standard_LRS
|
||||
|
||||
@@ -8,4 +8,4 @@
|
||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||
|
||||
- name: Generate inventory
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs IPs
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- set_fact:
|
||||
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||
|
||||
- name: Generate inventory
|
||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||
@@ -1,34 +0,0 @@
|
||||
|
||||
{% for vm in vm_ip_list %}
|
||||
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
|
||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||
{% else %}
|
||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[etcd]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'etcd' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
apiVersion: "2015-06-15"
|
||||
|
||||
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||
virtualNetworkName: "KubVNET"
|
||||
|
||||
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
|
||||
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
|
||||
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
|
||||
subnetAdminName: "ad-subnet"
|
||||
subnetMastersName: "master-subnet"
|
||||
subnetMinionsName: "minion-subnet"
|
||||
|
||||
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
|
||||
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
|
||||
routeTableName: "routetable"
|
||||
securityGroupName: "secgroup"
|
||||
|
||||
nameSuffix: "{{ cluster_name }}"
|
||||
nameSuffix: "{{cluster_name}}"
|
||||
|
||||
availabilitySetMasters: "master-avs"
|
||||
availabilitySetMinions: "minion-avs"
|
||||
@@ -33,5 +33,5 @@ imageReference:
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
storageAccountType: "Standard_LRS"
|
||||
|
||||
|
||||
@@ -62,8 +62,8 @@
|
||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||
},
|
||||
"protocol": "tcp",
|
||||
"frontendPort": "{{kube_apiserver_port}}",
|
||||
"backendPort": "{{kube_apiserver_port}}",
|
||||
"frontendPort": 443,
|
||||
"backendPort": 443,
|
||||
"enableFloatingIP": false,
|
||||
"idleTimeoutInMinutes": 5,
|
||||
"probe": {
|
||||
@@ -77,7 +77,7 @@
|
||||
"name": "kube-api",
|
||||
"properties": {
|
||||
"protocol": "tcp",
|
||||
"port": "{{kube_apiserver_port}}",
|
||||
"port": 443,
|
||||
"intervalInSeconds": 5,
|
||||
"numberOfProbes": 2
|
||||
}
|
||||
@@ -193,4 +193,4 @@
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@
|
||||
"description": "Allow secure kube-api",
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "{{kube_apiserver_port}}",
|
||||
"destinationPortRange": "443",
|
||||
"sourceAddressPrefix": "Internet",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
@@ -106,4 +106,4 @@
|
||||
"dependsOn": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,7 @@ import re
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||
'calico-rr', 'vault']
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
||||
class KubesprayInventory(object):
|
||||
class KargoInventory(object):
|
||||
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||
@@ -250,7 +250,6 @@ class KubesprayInventory(object):
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('etcd', host)
|
||||
self.add_host_to_group('vault', host)
|
||||
|
||||
def load_file(self, files=None):
|
||||
'''Directly loads JSON, or YAML file to inventory.'''
|
||||
@@ -338,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
KargoInventory(argv, CONFIG_FILE)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
[metadata]
|
||||
name = kubespray-inventory-builder
|
||||
name = kargo-inventory-builder
|
||||
version = 0.1
|
||||
|
||||
@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
|
||||
sys_mock.exit = mock.Mock()
|
||||
super(TestInventory, self).setUp()
|
||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||
self.inv = inventory.KubesprayInventory()
|
||||
self.inv = inventory.KargoInventory()
|
||||
|
||||
def test_get_ip_from_opts(self):
|
||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||
|
||||
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||
deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
### User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
@@ -1,3 +0,0 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
- bootstrap_os: none
|
||||
roles:
|
||||
- kvm-setup
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Upgrade all packages to the latest version (yum)
|
||||
yum:
|
||||
name: '*'
|
||||
state: latest
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
yum:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
with_items:
|
||||
- bind-utils
|
||||
- ntp
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
apt:
|
||||
upgrade: yes
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
install_recommends: no
|
||||
with_items:
|
||||
- dnsutils
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Upgrade all packages to the latest version (apt)
|
||||
shell: apt-get -o \
|
||||
Dpkg::Options::=--force-confdef -o \
|
||||
Dpkg::Options::=--force-confold -q -y \
|
||||
dist-upgrade
|
||||
environment:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
|
||||
# Create deployment user if required
|
||||
- include: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
# Set proper sysctl values
|
||||
- include: sysctl.yml
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
|
||||
- name: Add br_netfilter into /etc/modules
|
||||
lineinfile:
|
||||
dest: /etc/modules
|
||||
state: present
|
||||
line: 'br_netfilter'
|
||||
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||
|
||||
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kubespray.conf
|
||||
content: |-
|
||||
### This file is managed by Ansible
|
||||
br-netfilter
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: br_netfilter is defined
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
when: br_netfilter is defined
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
- name: Create user {{ k8s_deployment_user }}
|
||||
user:
|
||||
name: "{{ k8s_deployment_user }}"
|
||||
groups: adm
|
||||
shell: /bin/bash
|
||||
|
||||
- name: Ensure that .ssh exists
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
content: |
|
||||
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
|
||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Write private SSH key
|
||||
copy:
|
||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||
mode: 0400
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Write public SSH key
|
||||
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
|
||||
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
args:
|
||||
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Fix ssh-pub-key permissions
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
mode: 0600
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
@@ -1,4 +1,4 @@
|
||||
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
||||
|
||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||
|
||||
@@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
||||
|
||||
## Using Terraform and Ansible
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```
|
||||
cluster_name = "cluster1"
|
||||
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
---
|
||||
- hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
@@ -21,5 +12,6 @@
|
||||
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv/lib }
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../inventory/group_vars
|
||||
@@ -1 +0,0 @@
|
||||
../../../../roles/bootstrap-os
|
||||
@@ -4,7 +4,6 @@
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"port": 1}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
%global srcname kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: kubespray
|
||||
Version: master
|
||||
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python2-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
export PBR_VERSION=%{release}
|
||||
%{__python2} setup.py build bdist_rpm
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{release}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
|
||||
|
||||
|
||||
%files
|
||||
%doc %{_docdir}/%{name}/README.md
|
||||
%doc %{_docdir}/%{name}/inventory/inventory.example
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/group_vars/k8s-cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
%{_datarootdir}/%{name}/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
2
contrib/terraform/aws/.gitignore
vendored
2
contrib/terraform/aws/.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
*.tfstate*
|
||||
.terraform
|
||||
inventory
|
||||
|
||||
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
@@ -0,0 +1,261 @@
|
||||
variable "deploymentName" {
|
||||
type = "string"
|
||||
description = "The desired name of your deployment."
|
||||
}
|
||||
|
||||
variable "numControllers"{
|
||||
type = "string"
|
||||
description = "Desired # of controllers."
|
||||
}
|
||||
|
||||
variable "numEtcd" {
|
||||
type = "string"
|
||||
description = "Desired # of etcd nodes. Should be an odd number."
|
||||
}
|
||||
|
||||
variable "numNodes" {
|
||||
type = "string"
|
||||
description = "Desired # of nodes."
|
||||
}
|
||||
|
||||
variable "volSizeController" {
|
||||
type = "string"
|
||||
description = "Volume size for the controllers (GB)."
|
||||
}
|
||||
|
||||
variable "volSizeEtcd" {
|
||||
type = "string"
|
||||
description = "Volume size for etcd (GB)."
|
||||
}
|
||||
|
||||
variable "volSizeNodes" {
|
||||
type = "string"
|
||||
description = "Volume size for nodes (GB)."
|
||||
}
|
||||
|
||||
variable "subnet" {
|
||||
type = "string"
|
||||
description = "The subnet in which to put your cluster."
|
||||
}
|
||||
|
||||
variable "securityGroups" {
|
||||
type = "string"
|
||||
description = "The sec. groups in which to put your cluster."
|
||||
}
|
||||
|
||||
variable "ami"{
|
||||
type = "string"
|
||||
description = "AMI to use for all VMs in cluster."
|
||||
}
|
||||
|
||||
variable "SSHKey" {
|
||||
type = "string"
|
||||
description = "SSH key to use for VMs."
|
||||
}
|
||||
|
||||
variable "master_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for masters."
|
||||
}
|
||||
|
||||
variable "etcd_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for etcd."
|
||||
}
|
||||
|
||||
variable "node_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for nodes."
|
||||
}
|
||||
|
||||
variable "terminate_protect" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "awsRegion" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "${var.awsRegion}"
|
||||
}
|
||||
|
||||
variable "iam_prefix" {
|
||||
type = "string"
|
||||
description = "Prefix name for IAM profiles"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_profile"
|
||||
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "kubernetes_master_role" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_role"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "kubernetes_master_policy" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_policy"
|
||||
role = "${aws_iam_role.kubernetes_master_role.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_profile"
|
||||
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "kubernetes_node_role" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_role"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "kubernetes_node_policy" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_policy"
|
||||
role = "${aws_iam_role.kubernetes_node_role.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:Describe*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:AttachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:DetachVolume",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_instance" "master" {
|
||||
count = "${var.numControllers}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.master_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeController}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-master-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "etcd" {
|
||||
count = "${var.numEtcd}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.etcd_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeEtcd}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-etcd-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "minion" {
|
||||
count = "${var.numNodes}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.node_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeNodes}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-minion-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
output "kubernetes_master_profile" {
|
||||
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||
}
|
||||
|
||||
output "kubernetes_node_profile" {
|
||||
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||
}
|
||||
|
||||
output "master-ip" {
|
||||
value = "${join(", ", aws_instance.master.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "etcd-ip" {
|
||||
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "minion-ip" {
|
||||
value = "${join(", ", aws_instance.minion.*.private_ip)}"
|
||||
}
|
||||
|
||||
|
||||
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
@@ -0,0 +1,37 @@
|
||||
variable "SSHUser" {
|
||||
type = "string"
|
||||
description = "SSH User for VMs."
|
||||
}
|
||||
|
||||
resource "null_resource" "ansible-provision" {
|
||||
|
||||
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
|
||||
|
||||
##Create Master Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"[kube-master]\" > inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
##Create ETCD Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[etcd]\" >> inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
##Create Nodes Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[kube-node]\" >> inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
|
||||
}
|
||||
}
|
||||
@@ -2,124 +2,27 @@
|
||||
|
||||
**Overview:**
|
||||
|
||||
This project will create:
|
||||
* VPC with Public and Private Subnets in # Availability Zones
|
||||
* Bastion Hosts and NAT Gateways in the Public Subnet
|
||||
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||
* even distributed over the # of Availability Zones
|
||||
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||
- This will create nodes in a VPC inside of AWS
|
||||
|
||||
**Requirements**
|
||||
- Terraform 0.8.7 or newer
|
||||
- A dynamic number of masters, etcd, and nodes can be created
|
||||
|
||||
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
|
||||
|
||||
**How to Use:**
|
||||
|
||||
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||
- Export the variables for your Amazon credentials:
|
||||
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="www"
|
||||
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||
export AWS_SSH_KEY_NAME="yyy"
|
||||
export AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
|
||||
Example:
|
||||
```commandline
|
||||
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
||||
export AWS_ACCESS_KEY_ID="xxx"
|
||||
export AWS_SECRET_ACCESS_KEY="yyy"
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
||||
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
```
|
||||
- Run with `terraform apply`
|
||||
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
|
||||
|
||||
Example (this one assumes you are using CoreOS)
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
```
|
||||
***Using other distrib than CoreOs***
|
||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
**Future Work:**
|
||||
|
||||
For example, to use:
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-jessie-amd64-hvm-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["dcos-centos7-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
|
||||
**Troubleshooting**
|
||||
|
||||
***Remaining AWS IAM Instance Profile***:
|
||||
|
||||
If the cluster was destroyed without using Terraform it is possible that
|
||||
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||
the `AWS CLI` with the following command:
|
||||
```
|
||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||
```
|
||||
|
||||
***Ansible Inventory doesnt get created:***
|
||||
|
||||
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||
|
||||
**Architecture**
|
||||
|
||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||
|
||||

|
||||
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.
|
||||
@@ -1,191 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.8.7"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
||||
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
||||
region = "${var.AWS_DEFAULT_REGION}"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
||||
/*
|
||||
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||
* and AWS IAM Roles for Kubernetes Deployment
|
||||
*/
|
||||
|
||||
module "aws-vpc" {
|
||||
source = "modules/vpc"
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
module "aws-elb" {
|
||||
source = "modules/elb"
|
||||
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
module "aws-iam" {
|
||||
source = "modules/iam"
|
||||
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
}
|
||||
|
||||
/*
|
||||
* Create Bastion Instances in AWS
|
||||
*
|
||||
*/
|
||||
|
||||
resource "aws_instance" "bastion-server" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_bastion_size}"
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
associate_public_ip_address = true
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create K8s Master and worker nodes and etcd instances
|
||||
*
|
||||
*/
|
||||
|
||||
resource "aws_instance" "k8s-master" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_master_size}"
|
||||
|
||||
count = "${var.aws_kube_master_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = "${var.aws_kube_master_num}"
|
||||
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_etcd_size}"
|
||||
|
||||
count = "${var.aws_etcd_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_worker_size}"
|
||||
|
||||
count = "${var.aws_kube_worker_num}"
|
||||
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
|
||||
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Create Kubespray Inventory File
|
||||
*
|
||||
*/
|
||||
data "template_file" "inventory" {
|
||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||
|
||||
vars {
|
||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||
}
|
||||
|
||||
triggers {
|
||||
template = "${data.template_file.inventory.rendered}"
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#AWS Access Key
|
||||
AWS_ACCESS_KEY_ID = ""
|
||||
#AWS Secret Key
|
||||
AWS_SECRET_ACCESS_KEY = ""
|
||||
#EC2 SSH Key Name
|
||||
AWS_SSH_KEY_NAME = ""
|
||||
#AWS Region
|
||||
AWS_DEFAULT_REGION = "eu-central-1"
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 114 KiB |
@@ -1,58 +0,0 @@
|
||||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = "${var.aws_vpc_id}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = "${var.aws_elb_api_port}"
|
||||
to_port = "${var.k8s_secure_api_port}"
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = ["${var.aws_subnet_ids_public}"]
|
||||
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||
|
||||
listener {
|
||||
instance_port = "${var.k8s_secure_api_port}"
|
||||
instance_protocol = "tcp"
|
||||
lb_port = "${var.aws_elb_api_port}"
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "TCP:${var.k8s_secure_api_port}"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
cross_zone_load_balancing = true
|
||||
idle_timeout = 400
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))}"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "aws_elb_api_id" {
|
||||
value = "${aws_elb.aws-elb-api.id}"
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${aws_elb.aws-elb-api.dns_name}"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
variable "aws_cluster_name" {
|
||||
description = "Name of Cluster"
|
||||
}
|
||||
|
||||
variable "aws_vpc_id" {
|
||||
description = "AWS VPC ID"
|
||||
}
|
||||
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
description = "Secure Port of K8S API Server"
|
||||
}
|
||||
|
||||
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_subnet_ids_public" {
|
||||
description = "IDs of Public Subnets"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
#Add AWS Roles for Kubernetes
|
||||
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "kube-worker" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
#Add AWS Policies for Kubernetes
|
||||
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = "${aws_iam_role.kube-master.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kubernetes-*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "kube-worker" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||
role = "${aws_iam_role.kube-worker.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kubernetes-*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:Describe*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:AttachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:DetachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = "${aws_iam_role.kube-master.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||
role = "${aws_iam_role.kube-worker.name}"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "kube-master-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-master.name }"
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-worker.name }"
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
variable "aws_cluster_name" {
|
||||
description = "Name of Cluster"
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
|
||||
resource "aws_vpc" "cluster-vpc" {
|
||||
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
|
||||
#DNS Related Entries
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
vpc = true
|
||||
}
|
||||
|
||||
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count="${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
count="${length(var.aws_avail_zones)}"
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))}"
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
|
||||
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
||||
|
||||
resource "aws_route_table" "kubernetes-public" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
#Kubernetes Security Groups
|
||||
|
||||
resource "aws_security_group" "kubernetes" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
type = "ingress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "-1"
|
||||
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||
type = "ingress"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
output "aws_vpc_id" {
|
||||
value = "${aws_vpc.cluster-vpc.id}"
|
||||
}
|
||||
|
||||
output "aws_subnet_ids_private" {
|
||||
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
||||
}
|
||||
|
||||
output "aws_subnet_ids_public" {
|
||||
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
||||
}
|
||||
|
||||
output "aws_security_group" {
|
||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
variable "aws_vpc_cidr_block" {
|
||||
description = "CIDR Blocks for AWS VPC"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_cluster_name" {
|
||||
description = "Name of Cluster"
|
||||
}
|
||||
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "AWS Availability Zones Used"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability zones"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability zones"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
output "bastion_ip" {
|
||||
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
|
||||
}
|
||||
|
||||
output "masters" {
|
||||
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "workers" {
|
||||
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "etcd" {
|
||||
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
|
||||
}
|
||||
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
}
|
||||
|
||||
output "inventory" {
|
||||
value = "${data.template_file.inventory.rendered}"
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_node}
|
||||
${connection_strings_etcd}
|
||||
|
||||
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
|
||||
[kube-node]
|
||||
${list_node}
|
||||
|
||||
|
||||
[etcd]
|
||||
${list_etcd}
|
||||
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
@@ -1,33 +1,22 @@
|
||||
#Global Vars
|
||||
aws_cluster_name = "devtest"
|
||||
deploymentName="test-kube-deploy"
|
||||
|
||||
#VPC Vars
|
||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||
numControllers="2"
|
||||
numEtcd="3"
|
||||
numNodes="2"
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_size = "t2.medium"
|
||||
volSizeController="20"
|
||||
volSizeEtcd="20"
|
||||
volSizeNodes="20"
|
||||
|
||||
awsRegion="us-west-2"
|
||||
subnet="subnet-xxxxx"
|
||||
ami="ami-32a85152"
|
||||
securityGroups="sg-xxxxx"
|
||||
SSHUser="core"
|
||||
SSHKey="my-key"
|
||||
|
||||
#Kubernetes Cluster
|
||||
master_instance_type="m3.xlarge"
|
||||
etcd_instance_type="m3.xlarge"
|
||||
node_instance_type="m3.xlarge"
|
||||
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest"
|
||||
# Product = "kubernetes"
|
||||
}
|
||||
terminate_protect="false"
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#Global Vars
|
||||
aws_cluster_name = "devtest"
|
||||
|
||||
#VPC Vars
|
||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_ami = "ami-5900cc36"
|
||||
aws_bastion_size = "t2.small"
|
||||
|
||||
|
||||
#Kubernetes Cluster
|
||||
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_cluster_ami = "ami-903df7ff"
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = 0.0.0.0
|
||||
@@ -1,105 +0,0 @@
|
||||
variable "AWS_ACCESS_KEY_ID" {
|
||||
description = "AWS Access Key"
|
||||
}
|
||||
|
||||
variable "AWS_SECRET_ACCESS_KEY" {
|
||||
description = "AWS Secret Key"
|
||||
}
|
||||
|
||||
variable "AWS_SSH_KEY_NAME" {
|
||||
description = "Name of the SSH keypair to use in AWS."
|
||||
}
|
||||
|
||||
variable "AWS_DEFAULT_REGION" {
|
||||
description = "AWS Region"
|
||||
}
|
||||
|
||||
//General Cluster Settings
|
||||
|
||||
variable "aws_cluster_name" {
|
||||
description = "Name of AWS Cluster"
|
||||
}
|
||||
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["CoreOS-stable-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["595879546273"] #CoreOS
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
||||
variable "aws_vpc_cidr_block" {
|
||||
description = "CIDR Block for VPC"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability Zones"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
//AWS EC2 Settings
|
||||
|
||||
variable "aws_bastion_size" {
|
||||
description = "EC2 Instance Size of Bastion Host"
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS EC2 Settings
|
||||
* The number should be divisable by the number of used
|
||||
* AWS Availability Zones without an remainder.
|
||||
*/
|
||||
variable "aws_kube_master_num" {
|
||||
description = "Number of Kubernetes Master Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_size" {
|
||||
description = "Instance size of Kube Master Nodes"
|
||||
}
|
||||
|
||||
variable "aws_etcd_num" {
|
||||
description = "Number of etcd Nodes"
|
||||
}
|
||||
|
||||
variable "aws_etcd_size" {
|
||||
description = "Instance size of etcd Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_num" {
|
||||
description = "Number of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_size" {
|
||||
description = "Instance size of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
*
|
||||
*/
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
description = "Secure Port of K8S API Server"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../inventory/group_vars
|
||||
@@ -5,91 +5,63 @@ Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||
services.
|
||||
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by the main ansible script
|
||||
to actually install kubernetes and stand up the cluster.
|
||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||
|
||||
### Networking
|
||||
The configuration includes creating a private subnet with a router to the
|
||||
external net. It will allocate floating-ips from a pool and assign them to the
|
||||
hosts where that makes sense. You have the option of creating bastion hosts
|
||||
inside the private subnet to access the nodes there.
|
||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
||||
* you already have a suitable OS image in glance
|
||||
* you already have both an internal network and a floating-ip pool created
|
||||
* you have security-groups enabled
|
||||
|
||||
### Kubernetes Nodes
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating ip addresses or not.
|
||||
- Master Nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
- Kubernetes worker nodes
|
||||
|
||||
Note that the ansible script will report an invalid configuration if you wind up
|
||||
with an even number of etcd instances since that is not a valid configuration.
|
||||
|
||||
### Gluster FS
|
||||
The terraform configuration supports provisioning of an optional GlusterFS
|
||||
shared file system based on a separate set of VMs. To enable this, you need to
|
||||
specify
|
||||
- the number of gluster hosts
|
||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||
- Other properties related to provisioning the hosts
|
||||
|
||||
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
||||
need the GlusterFS VMs to be based on either Debian or RedHat based images,
|
||||
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
||||
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in glance
|
||||
- you already have a floating-ip pool created
|
||||
- you have security-groups enabled
|
||||
- you have a pair of keys generated that can be used to secure the new hosts
|
||||
|
||||
## Module Architecture
|
||||
The configuration is divided into three modules:
|
||||
- Network
|
||||
- IPs
|
||||
- Compute
|
||||
|
||||
The main reason for splitting the configuration up in this way is to easily
|
||||
accommodate situations where floating IPs are limited by a quota or if you have
|
||||
any external references to the floating IP (e.g. DNS) that would otherwise have
|
||||
to be updated.
|
||||
|
||||
You can force your existing IPs by modifying the compute variables in
|
||||
`kubespray.tf` as
|
||||
|
||||
```
|
||||
k8s_master_fips = ["151.101.129.67"]
|
||||
k8s_node_fips = ["151.101.129.68"]
|
||||
```
|
||||
|
||||
## Terraform
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also
|
||||
used to deploy and provision the software requirements.
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||
requirements.
|
||||
|
||||
### Prep
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment
|
||||
variables. This can be done by downloading a credentials .rc file from your
|
||||
OpenStack dashboard and sourcing it:
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
```
|
||||
|
||||
You will need two networks before installing, an internal network and
|
||||
an external (floating IP Pool) network. The internet network can be shared as
|
||||
we use security groups to provide network segregation. Due to the many
|
||||
differences between OpenStack installs the Terraform does not attempt to create
|
||||
these for you.
|
||||
|
||||
By default Terraform will expect that your networks are called `internal` and
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||
|
||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||
|
||||
All OpenStack resources will use the Terraform variable `cluster_name` (
|
||||
default `example`) in their name to make it easier to track. For example the
|
||||
first compute resource will be named `example-kubernetes-1`.
|
||||
|
||||
#### Terraform
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into Terraform
|
||||
environment variables. Likely via a command similar to:
|
||||
|
||||
@@ -101,78 +73,60 @@ $ echo Setting up Terraform creds && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
### Terraform Variables
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
The best way to set these values is to create a file in the project's root
|
||||
directory called something like`my-terraform-vars.tfvars`. Many of the
|
||||
variables are obvious. Here is a summary of some of the more interesting
|
||||
ones:
|
||||
|
||||
|Variable | Description |
|
||||
|---------|-------------|
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|
||||
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|
||||
|`number_of_etcd` | Number of pure etcd nodes |
|
||||
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|
||||
## Initializing Terraform
|
||||
Before Terraform can operate on your cluster you need to install required
|
||||
plugins. This is accomplished with the command
|
||||
|
||||
```bash
|
||||
$ terraform init contrib/terraform/openstack
|
||||
```
|
||||
|
||||
## Provisioning Cluster with Terraform
|
||||
You can apply the terraform config to your cluster with the following command
|
||||
issued from the project's root directory
|
||||
```bash
|
||||
$ terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to
|
||||
be able to access your machines tunneling through the bastion's ip adress. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
pick it up automatically.
|
||||
|
||||
|
||||
## Destroying Cluster with Terraform
|
||||
You can destroy a config deployed to your cluster with the following command
|
||||
issued from the project's root directory
|
||||
```bash
|
||||
$ terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
## Debugging Cluster Provisioning
|
||||
You can enable debugging output from Terraform by setting
|
||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before runing the terraform command
|
||||
|
||||
|
||||
# Running the Ansible Script
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "0"
|
||||
```
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||
|
||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||
|
||||
```
|
||||
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||
# This is the name of an image already available in your openstack installation.
|
||||
image_gfs = "Ubuntu 15.10"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||
gfs_volume_size_in_gb = "50"
|
||||
# The user needed for the image choosen for GlusterFS.
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
|
||||
If not using a tfvars file for your setup, then execute:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||
description: "" => "example - Kubernetes Master"
|
||||
name: "" => "example-k8s-master"
|
||||
rule.#: "" => "<computed>"
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
||||
|
||||
The state of your infrastructure has been saved to the path
|
||||
below. This state is required to modify and destroy your
|
||||
infrastructure, so keep it safe. To inspect the complete state
|
||||
use the `terraform show` command.
|
||||
|
||||
State path: contrib/terraform/openstack/terraform.tfstate
|
||||
```
|
||||
|
||||
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
@@ -192,116 +146,26 @@ example-k8s-master-1 | SUCCESS => {
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by
|
||||
CoreOS, these might have a state`FAILED` due to Container Linux by CoreOS not
|
||||
having python. As long as the state is not`UNREACHABLE`, this is fine.
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be something as simple as a stale host key.
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
## Configure Cluster variables
|
||||
|
||||
Edit`inventory/group_vars/all.yml`:
|
||||
- Set variable **bootstrap_os** according selected image
|
||||
```
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: coreos
|
||||
```
|
||||
- **bin_dir**
|
||||
```
|
||||
# Directory where the binaries will be installed
|
||||
# Default:
|
||||
# bin_dir: /usr/local/bin
|
||||
# For Container Linux by CoreOS:
|
||||
bin_dir: /opt/bin
|
||||
```
|
||||
- and **cloud_provider**
|
||||
```
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit`inventory/group_vars/k8s-cluster.yml`:
|
||||
- Set variable **kube_network_plugin** according selected networking
|
||||
```
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
```
|
||||
> flannel works out-of-the-box
|
||||
|
||||
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
||||
- Set variable **resolvconf_mode**
|
||||
```
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
# Default:
|
||||
# resolvconf_mode: docker_dns
|
||||
# For Container Linux by CoreOS:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
|
||||
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||
|
||||
## Deploy kubernetes:
|
||||
Deploy kubernetes:
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||
```
|
||||
|
||||
## Set up local kubectl
|
||||
1. Install kubectl on your workstation:
|
||||
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
2. Add route to internal IP of master node (if needed):
|
||||
```
|
||||
sudo route add [master-internal-ip] gw [router-ip]
|
||||
```
|
||||
or
|
||||
```
|
||||
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||
```
|
||||
3. List Kubernetes certs&keys:
|
||||
```
|
||||
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||
```
|
||||
4. Get admin's certs&key:
|
||||
```
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Configure kubectl:
|
||||
```
|
||||
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
--certificate-authority=ca.pem
|
||||
# clean up:
|
||||
|
||||
kubectl config set-credentials default-admin \
|
||||
--certificate-authority=ca.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--client-certificate=admin.pem
|
||||
|
||||
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
kubectl config use-context default-system
|
||||
```
|
||||
7. Check it:
|
||||
$ terraform destroy
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
||||
```
|
||||
kubectl version
|
||||
```
|
||||
|
||||
If you are using floating ip addresses then you may get this error:
|
||||
```
|
||||
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
||||
```
|
||||
|
||||
You can tell kubectl to ignore this condition by adding the
|
||||
`--insecure-skip-tls-verify` option.
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
[glusterfs playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
for instructions.
|
||||
|
||||
Basically you will install gluster as
|
||||
```bash
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
|
||||
# What's next
|
||||
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../inventory/group_vars
|
||||
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
1
contrib/terraform/openstack/group_vars/all.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/group_vars/all.yml
|
||||
@@ -1,55 +1,167 @@
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
module "network" {
|
||||
source = "modules/network"
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
|
||||
module "ips" {
|
||||
source = "modules/ips"
|
||||
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
floatingip_pool = "${var.floatingip_pool}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
router_id = "${module.network.router_id}"
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
source = "modules/compute"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||
public_key_path = "${var.public_key_path}"
|
||||
image = "${var.image}"
|
||||
image_gfs = "${var.image_gfs}"
|
||||
ssh_user = "${var.ssh_user}"
|
||||
ssh_user_gfs = "${var.ssh_user_gfs}"
|
||||
flavor_k8s_master = "${var.flavor_k8s_master}"
|
||||
flavor_k8s_node = "${var.flavor_k8s_node}"
|
||||
flavor_etcd = "${var.flavor_etcd}"
|
||||
flavor_gfs_node = "${var.flavor_gfs_node}"
|
||||
network_name = "${var.network_name}"
|
||||
flavor_bastion = "${var.flavor_bastion}"
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage"
|
||||
}
|
||||
volume {
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#output "msg" {
|
||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||
#}
|
||||
|
||||
@@ -1,280 +0,0 @@
|
||||
|
||||
|
||||
variable user_data {
|
||||
type = "string"
|
||||
default = <<EOF
|
||||
#cloud-config
|
||||
manage_etc_hosts: localhost
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
EOF
|
||||
}
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${chomp(file(var.public_key_path))}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "6443"
|
||||
to_port = "6443"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "${var.user_data}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
user_data = "#cloud-config\nmanage_etc_hosts: localhost\npackage_update: true\npackage_upgrade: true"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
variable "cluster_name" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
}
|
||||
|
||||
variable "number_of_gfs_nodes_no_floating_ip" {
|
||||
}
|
||||
|
||||
variable "gfs_volume_size_in_gb" {
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
}
|
||||
|
||||
variable "image_gfs" {
|
||||
}
|
||||
|
||||
variable "ssh_user" {
|
||||
}
|
||||
|
||||
variable "ssh_user_gfs" {
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
}
|
||||
|
||||
|
||||
variable "network_id"{
|
||||
|
||||
}
|
||||
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "k8s_node_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
type = "list"
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
|
||||
resource "null_resource" "dummy_dependency" {
|
||||
triggers {
|
||||
dependency_id = "${var.router_id}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
output "k8s_master_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
variable "number_of_k8s_masters" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
|
||||
}
|
||||
|
||||
variable "external_net" {
|
||||
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "router_id"{
|
||||
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
|
||||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
admin_state_up = "true"
|
||||
external_gateway = "${var.external_net}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
cidr = "10.0.0.0/24"
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "router_id" {
|
||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
variable "external_net" {
|
||||
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
}
|
||||
|
||||
variable "dns_nameservers"{
|
||||
type = "list"
|
||||
}
|
||||
@@ -2,30 +2,14 @@ variable "cluster_name" {
|
||||
default = "example"
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_etcd" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 1
|
||||
}
|
||||
@@ -67,28 +51,15 @@ variable "ssh_user_gfs" {
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
@@ -97,21 +68,11 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "dns_nameservers"{
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = "list"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "external_net" {
|
||||
description = "uuid of the external/public network"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
description = "Your openstack username"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2015 Cisco Systems, Inc.
|
||||
#
|
||||
@@ -70,14 +70,6 @@ def iterhosts(resources):
|
||||
yield parser(resource, module_name)
|
||||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
def parses(prefix):
|
||||
def inner(func):
|
||||
PARSERS[prefix] = func
|
||||
@@ -306,17 +298,6 @@ def softlayer_host(resource, module_name):
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
@@ -362,8 +343,6 @@ def openstack_host(resource, module_name):
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
|
||||
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
@@ -677,19 +656,6 @@ def clc_server(resource, module_name):
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
'public_ipv4': ip,
|
||||
'ansible_ssh_host': ip,
|
||||
})
|
||||
yield host
|
||||
|
||||
|
||||
## QUERY TYPES
|
||||
def query_host(hosts, target):
|
||||
@@ -761,13 +727,6 @@ def main():
|
||||
parser.exit()
|
||||
|
||||
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||
|
||||
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
|
||||
ips = dict(iterips(iterresources(tfstates(args.root))))
|
||||
|
||||
if ips:
|
||||
hosts = iter_host_ips(hosts, ips)
|
||||
|
||||
if args.list:
|
||||
output = query_list(hosts)
|
||||
if args.nometa:
|
||||
|
||||
@@ -8,7 +8,7 @@ The inventory is composed of 3 groups:
|
||||
|
||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
||||
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
||||
|
||||
Note: do not modify the children of _k8s-cluster_, like putting
|
||||
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
||||
@@ -27,7 +27,7 @@ not _kube-node_.
|
||||
|
||||
There are also two special groups:
|
||||
|
||||
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
|
||||
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
|
||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||
|
||||
Below is a complete inventory example:
|
||||
@@ -67,33 +67,31 @@ Group vars and overriding variables precedence
|
||||
----------------------------------------------
|
||||
|
||||
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
||||
Optional variables are located in the `inventory/group_vars/all.yml`.
|
||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||
`inventory/group_vars/k8s-cluster.yml`.
|
||||
|
||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
those cannot be overriden from the group vars. In order to override, one should use
|
||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||
|
||||
Kubespray uses only a few layers to override things (or expect them to
|
||||
Kargo uses only a few layers to override things (or expect them to
|
||||
be overriden for roles):
|
||||
|
||||
Layer | Comment
|
||||
------|--------
|
||||
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||
**role defaults** | provides best UX to override things for Kargo deployments
|
||||
inventory vars | Unused
|
||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||
inventory host_vars | Unused
|
||||
playbook group_vars | Unused
|
||||
playbook group_vars | Unuses
|
||||
playbook host_vars | Unused
|
||||
**host facts** | Kubespray overrides for internal roles' logic, like state flags
|
||||
**host facts** | Kargo overrides for internal roles' logic, like state flags
|
||||
play vars | Unused
|
||||
play vars_prompt | Unused
|
||||
play vars_files | Unused
|
||||
registered vars | Unused
|
||||
set_facts | Kubespray overrides those, for some places
|
||||
set_facts | Kargo overrides those, for some places
|
||||
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
|
||||
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
|
||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||
|
||||
@@ -124,12 +122,12 @@ The following tags are defined in playbooks:
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||
| k8s-secrets | Configuring K8s certs/keys
|
||||
| kpm | Installing K8s apps definitions with KPM
|
||||
| kube-apiserver | Configuring static pod kube-apiserver
|
||||
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||
| kube-apiserver | Configuring self-hosted kube-apiserver
|
||||
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
||||
| kubectl | Installing kubectl and bash completion
|
||||
| kubelet | Configuring kubelet service
|
||||
| kube-proxy | Configuring static pod kube-proxy
|
||||
| kube-scheduler | Configuring static pod kube-scheduler
|
||||
| kube-proxy | Configuring self-hosted kube-proxy
|
||||
| kube-scheduler | Configuring self-hosted kube-scheduler
|
||||
| localhost | Special steps for the localhost (ansible runner)
|
||||
| master | Configuring K8s master node role
|
||||
| netchecker | Installing netchecker K8s app
|
||||
@@ -157,12 +155,12 @@ ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsma
|
||||
```
|
||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
||||
```
|
||||
And this prepares all container images localy (at the ansible runner node) without installing
|
||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yml \
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yaml \
|
||||
-e download_run_once=true -e download_localhost=true \
|
||||
--tags download --skip-tags upload,upgrade
|
||||
```
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
Atomic host bootstrap
|
||||
=====================
|
||||
|
||||
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
|
||||
|
||||
Note: Flannel is the only plugin that has currently been tested with atomic
|
||||
|
||||
### Vagrant
|
||||
|
||||
* For bootstrapping with Vagrant, use box centos/atomic-host
|
||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||
|
||||
```
|
||||
vagrant ssh
|
||||
sudo /sbin/ifup enp0s8
|
||||
```
|
||||
|
||||
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||
|
||||
Then you can proceed to [cluster deployment](#run-deployment)
|
||||
52
docs/aws.md
52
docs/aws.md
@@ -3,58 +3,8 @@ AWS
|
||||
|
||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
|
||||
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||
|
||||
You can now create your cluster!
|
||||
|
||||
### Dynamic Inventory ###
|
||||
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
||||
|
||||
This will produce an inventory that is passed into Ansible that looks like the following:
|
||||
```
|
||||
{
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
"ip-172-31-3-xxx.us-east-2.compute.internal": {
|
||||
"ansible_ssh_host": "172.31.3.xxx"
|
||||
},
|
||||
"ip-172-31-8-xxx.us-east-2.compute.internal": {
|
||||
"ansible_ssh_host": "172.31.8.xxx"
|
||||
}
|
||||
}
|
||||
},
|
||||
"etcd": [
|
||||
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||
],
|
||||
"k8s-cluster": {
|
||||
"children": [
|
||||
"kube-master",
|
||||
"kube-node"
|
||||
]
|
||||
},
|
||||
"kube-master": [
|
||||
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||
],
|
||||
"kube-node": [
|
||||
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Guide:
|
||||
- Create instances in AWS as needed.
|
||||
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||
export REGION="us-east-2"
|
||||
```
|
||||
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Azure
|
||||
===============
|
||||
|
||||
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||
|
||||
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||
|
||||
@@ -49,8 +49,8 @@ This is the AppId from the last command
|
||||
- Create the role assignment with:
|
||||
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||
|
||||
## Provisioning Azure with Resource Group Templates
|
||||
|
||||
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||
@@ -96,7 +96,7 @@ You need to edit your inventory and add:
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with route reflectors:
|
||||
Here's an example of Kargo inventory with route reflectors:
|
||||
|
||||
```
|
||||
[all]
|
||||
@@ -145,27 +145,9 @@ cluster_id="1.0.0.1"
|
||||
The inventory above will deploy the following topology assuming that calico's
|
||||
`global_as_num` is set to `65400`:
|
||||
|
||||

|
||||
|
||||
##### Optional : Define default endpoint to host action
|
||||
|
||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
||||
|
||||
|
||||
To re-define default action please set the following variable in your inventory:
|
||||
```
|
||||
calico_endpoint_to_host_action: "ACCEPT"
|
||||
```
|
||||

|
||||
|
||||
Cloud providers configuration
|
||||
=============================
|
||||
|
||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||
|
||||
##### Optional : Ignore kernel's RPF check setting
|
||||
|
||||
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
||||
|
||||
```
|
||||
calico_node_ignorelooserpf: true
|
||||
```
|
||||
|
||||
@@ -3,17 +3,17 @@ Cloud providers
|
||||
|
||||
#### Provisioning
|
||||
|
||||
You can use kubespray-cli to start new instances on cloud providers
|
||||
You can use kargo-cli to start new instances on cloud providers
|
||||
here's an example
|
||||
```
|
||||
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||
```
|
||||
|
||||
#### Deploy kubernetes
|
||||
|
||||
With kubespray-cli
|
||||
With kargo-cli
|
||||
```
|
||||
kubespray deploy [--aws|--gce] -u admin
|
||||
kargo deploy [--aws|--gce] -u admin
|
||||
```
|
||||
|
||||
Or ansible-playbook command
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||
Kargo vs [Kops](https://github.com/kubernetes/kops)
|
||||
---------------
|
||||
|
||||
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
|
||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||
itself, and as such is less flexible in deployment platforms. For people with
|
||||
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
|
||||
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
|
||||
however, is more tightly integrated with the unique features of the clouds it
|
||||
supports so it could be a better choice if you know that you will only be using
|
||||
one platform for the foreseeable future.
|
||||
|
||||
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||
------------------
|
||||
|
||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||
management, including self-hosted layouts, dynamic discovery services and so
|
||||
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||
it would've likely been named a "Kubernetes cluster operator". Kargo however,
|
||||
does generic configuration management tasks from the "OS operators" ansible
|
||||
world, plus some initial K8s clustering (with networking plugins included) and
|
||||
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
|
||||
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
|
||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||
knowledge from it and offload generic OS configuration things from it, which
|
||||
hopefully benefits both sides.
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
Contiv
|
||||
======
|
||||
|
||||
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
||||
|
||||
## Administrate Contiv
|
||||
|
||||
There are two ways to manage Contiv:
|
||||
|
||||
* a web UI managed by the api proxy service
|
||||
* a CLI named `netctl`
|
||||
|
||||
|
||||
### Interfaces
|
||||
|
||||
#### The Web Interface
|
||||
|
||||
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
|
||||
|
||||
You can configure the api proxy by overriding the following variables:
|
||||
|
||||
```yaml
|
||||
contiv_enable_api_proxy: true
|
||||
contiv_api_proxy_port: 10000
|
||||
contiv_generate_certificate: true
|
||||
```
|
||||
|
||||
The default credentials to log in are: admin/admin.
|
||||
|
||||
|
||||
#### The Command Line Interface
|
||||
|
||||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||
|
||||
```bash
|
||||
export NETMASTER=http://127.0.0.1:9999
|
||||
```
|
||||
|
||||
The port can be changed by overriding the following variable:
|
||||
|
||||
```yaml
|
||||
contiv_netmaster_port: 9999
|
||||
```
|
||||
|
||||
The CLI doesn't use the authentication process needed by the web interface.
|
||||
|
||||
|
||||
### Network configuration
|
||||
|
||||
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||
|
||||
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
|
||||
* `default-net` : the default network that hosts pods.
|
||||
|
||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||
|
||||
The default forward mode is set to routing:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: routing
|
||||
```
|
||||
|
||||
The following is an example of how you can use VLAN instead of VXLAN:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: bridge
|
||||
contiv_vlan_interface: eth0
|
||||
contiv_networks:
|
||||
- name: default-net
|
||||
subnet: "{{ kube_pods_subnet }}"
|
||||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
encap: vlan
|
||||
pkt_tag: 10
|
||||
```
|
||||
@@ -1,20 +1,24 @@
|
||||
CoreOS bootstrap
|
||||
===============
|
||||
|
||||
Example with **kubespray-cli**:
|
||||
Example with **kargo-cli**:
|
||||
|
||||
```
|
||||
kubespray deploy --gce --coreos
|
||||
kargo deploy --gce --coreos
|
||||
```
|
||||
|
||||
Or with Ansible:
|
||||
|
||||
Before running the cluster playbook you must satisfy the following requirements:
|
||||
|
||||
General CoreOS Pre-Installation Notes:
|
||||
- You should set the bootstrap_os variable to `coreos`
|
||||
- Ensure that the bin_dir is set to `/opt/bin`
|
||||
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
||||
|
||||
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
|
||||
|
||||
* run the Python bootstrap playbook
|
||||
|
||||
```
|
||||
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
|
||||
```
|
||||
|
||||
Then you can proceed to [cluster deployment](#run-deployment)
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
Debian Jessie
|
||||
===============
|
||||
|
||||
Debian Jessie installation Notes:
|
||||
|
||||
- Add
|
||||
|
||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||
|
||||
to /etc/default/grub. Then update with
|
||||
|
||||
```
|
||||
sudo update-grub
|
||||
sudo update-grub2
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||
|
||||
```apt-get -t jessie-backports install systemd```
|
||||
|
||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||
|
||||
- Add the Ansible repository and install Ansible to get a proper version
|
||||
|
||||
```
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
@@ -1,7 +1,7 @@
|
||||
K8s DNS stack by Kubespray
|
||||
K8s DNS stack by Kargo
|
||||
======================
|
||||
|
||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
|
||||
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||
|
||||
DNS modes supported by Kubespray
|
||||
DNS modes supported by kargo
|
||||
============================
|
||||
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
## dns_mode
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
|
||||
|
||||
#### dnsmasq_kubedns (default)
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
@@ -62,18 +62,12 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
#### manual
|
||||
This does not install dnsmasq or kubedns, but allows you to specify
|
||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||
Use this method if you plan to install your own DNS server in the cluster after
|
||||
initial deployment.
|
||||
|
||||
#### none
|
||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||
leaves you with a non functional cluster.
|
||||
|
||||
## resolvconf_mode
|
||||
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||
There are three modes available:
|
||||
|
||||
#### docker_dns (default)
|
||||
@@ -106,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
|
||||
servers, which in turn will forward queries to the system nameserver if required.
|
||||
|
||||
#### host_resolvconf
|
||||
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||
|
||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||
@@ -126,7 +120,7 @@ cluster service names.
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||
for details.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Downloading binaries and containers
|
||||
===================================
|
||||
|
||||
Kubespray supports several download/upload modes. The default is:
|
||||
Kargo supports several download/upload modes. The default is:
|
||||
|
||||
* Each node downloads binaries and container images on its own, which is
|
||||
``download_run_once: False``.
|
||||
|
||||
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
@@ -23,6 +23,13 @@ ip a show dev flannel.1
|
||||
valid_lft forever preferred_lft forever
|
||||
```
|
||||
|
||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
||||
|
||||
```
|
||||
ps aux | grep docker
|
||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
||||
```
|
||||
|
||||
* Try to run a container and check its ip address
|
||||
|
||||
```
|
||||
|
||||
@@ -1,44 +1,44 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The easiest way to run the deployement is to use the **kubespray-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
||||
|
||||
Here is a simple example on AWS:
|
||||
|
||||
* Create instances and generate the inventory
|
||||
|
||||
```
|
||||
kubespray aws --instances 3
|
||||
kargo aws --instances 3
|
||||
```
|
||||
|
||||
* Run the deployment
|
||||
|
||||
```
|
||||
kubespray deploy --aws -u centos -n calico
|
||||
kargo deploy --aws -u centos -n calico
|
||||
```
|
||||
|
||||
Building your own inventory
|
||||
---------------------------
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
|
||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
|
||||
to create or modify an Ansible inventory. Currently, it is limited in
|
||||
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
|
||||
support creating inventory file for large clusters as well. It now supports
|
||||
functionality and is only use for making a basic Kargo cluster, but it does
|
||||
support creating large clusters. It now supports
|
||||
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` help for more information.
|
||||
certain threshold. Run inventory.py help for more information.
|
||||
|
||||
Example inventory generator usage:
|
||||
|
||||
```
|
||||
cp -r inventory my_inventory
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
|
||||
```
|
||||
|
||||
Starting custom deployment
|
||||
@@ -47,76 +47,10 @@ Starting custom deployment
|
||||
Once you have an inventory, you may want to customize deployment data vars
|
||||
and start the deployment:
|
||||
|
||||
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
|
||||
|
||||
```
|
||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
||||
# Edit my_inventory/groups_vars/*.yaml to override data vars
|
||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yaml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
See more details in the [ansible guide](ansible.md).
|
||||
|
||||
Adding nodes
|
||||
------------
|
||||
|
||||
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||
|
||||
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||
```
|
||||
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
Connecting to Kubernetes
|
||||
------------------------
|
||||
By default, Kubespray configures kube-master hosts with insecure access to
|
||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||
More details on this process are in the [HA guide](ha-mode.md).
|
||||
|
||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||
kube-master host on port 6443 by default. However, this requires
|
||||
authentication. One could generate a kubeconfig based on one installed
|
||||
kube-master hosts (needs improvement) or connect with a username and password.
|
||||
By default, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
file yourself.
|
||||
|
||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||
|
||||
Accessing Kubernetes Dashboard
|
||||
------------------------------
|
||||
|
||||
As of kubernetes-dashboard v1.7.x:
|
||||
* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||
* Requires RBAC in authorization_modes
|
||||
* Only serves over https
|
||||
* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
|
||||
|
||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||
https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
||||
|
||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
|
||||
|
||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
|
||||
|
||||
Accessing Kubernetes API
|
||||
------------------------
|
||||
|
||||
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||
host and can optionally be configured on your ansible host by setting
|
||||
`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
|
||||
admin.conf will appear in the artifacts/ directory after deployment. You can
|
||||
see a list of nodes by running the following commands:
|
||||
|
||||
cd artifacts/
|
||||
./kubectl --kubeconfig admin.conf get nodes
|
||||
|
||||
If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
|
||||
|
||||
109
docs/ha-mode.md
109
docs/ha-mode.md
@@ -11,37 +11,42 @@ achieve the same goal.
|
||||
Etcd
|
||||
----
|
||||
|
||||
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
|
||||
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
|
||||
cluster members as backends. Note that the `access_ip` is used as the backend
|
||||
IP, if specified. Frontend endpoints cannot be accessed externally as they are
|
||||
bound to a localhost only.
|
||||
|
||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
||||
It makes deployed components to access the etcd cluster members
|
||||
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
|
||||
When enabled, it makes deployed components to access the etcd cluster members
|
||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||
do a loadbalancing and handle HA for connections.
|
||||
do a loadbalancing and handle HA for connections. Note, a pod definition of a
|
||||
flannel networking plugin always uses a single `--etcd-server` endpoint!
|
||||
|
||||
|
||||
Kube-apiserver
|
||||
--------------
|
||||
|
||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
||||
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient. This option is
|
||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||
You may also define the port the local internal loadbalancer uses by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of
|
||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||
loadbalancer.
|
||||
configured by the variable `loadbalancer_apiserver_localhost`. You may also
|
||||
define the port the local internal loadbalancer users by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||
It is also import to note that Kargo will only configure kubelet and kube-proxy
|
||||
on non-master nodes to use the local internal loadbalancer.
|
||||
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||
configure your own loadbalancer to achieve HA. Note that deploying a
|
||||
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
||||
By default, it only configures a non-HA endpoint, which points to the
|
||||
`access_ip` or IP address of the first server node in the `kube-master` group.
|
||||
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||
The following diagram shows how traffic to the apiserver is directed.
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||
a user and is not covered by ansible roles in Kargo. By default, it only configures
|
||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||
apiserver is directed.
|
||||
|
||||

|
||||
|
||||
@@ -63,77 +68,45 @@ listen kubernetes-apiserver-https
|
||||
mode tcp
|
||||
timeout client 3h
|
||||
timeout server 3h
|
||||
server master1 <IP1>:6443
|
||||
server master2 <IP2>:6443
|
||||
server master1 <IP1>:443
|
||||
server master2 <IP2>:443
|
||||
balance roundrobin
|
||||
```
|
||||
|
||||
Note: That's an example config managed elsewhere outside of Kubespray.
|
||||
|
||||
And the corresponding example global vars for such a "cluster-aware"
|
||||
external LB with the cluster API access modes configured in Kubespray:
|
||||
And the corresponding example global vars config:
|
||||
```
|
||||
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
loadbalancer_apiserver:
|
||||
address: <VIP>
|
||||
port: 8383
|
||||
```
|
||||
|
||||
Note: The default kubernetes apiserver configuration binds to all interfaces,
|
||||
so you will need to use a different port for the vip from that the API is
|
||||
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
||||
listens on a specific interface (to avoid conflict with haproxy binding the
|
||||
port on the VIP adddress)
|
||||
|
||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
||||
into the generated self-signed TLS/SSL certificates as well. Note that
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||
the HAProxy service should as well be HA and requires a VIP management, which
|
||||
is out of scope of this doc.
|
||||
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||
localhost LB configuration.
|
||||
|
||||
There is a special case for an internal and an externally configured (not with
|
||||
Kubespray) LB used simultaneously. Keep in mind that the cluster is not aware
|
||||
of such an external LB and you need no to specify any configuration variables
|
||||
for it.
|
||||
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
|
||||
Note: TLS/SSL termination for externally accessed API endpoints' will **not**
|
||||
be covered by Kubespray for that case. Make sure your external LB provides it.
|
||||
Alternatively you may specify an externally load balanced VIPs in the
|
||||
`supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into
|
||||
the generated cluster certifactes as well.
|
||||
Access endpoints are evaluated automagically, as the following:
|
||||
|
||||
Aside of that specific case, the `loadbalancer_apiserver` considered mutually
|
||||
exclusive to `loadbalancer_apiserver_localhost`.
|
||||
|
||||
Access API endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master | external |
|
||||
|------------------------------|----------------|---------------------|---------------------|
|
||||
| Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
|
||||
| Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
|
||||
| External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB | http://lc:p | https://lc:nsp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||
|
||||
Where:
|
||||
* `m[0]` - the first node in the `kube-master` group;
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||
* `lc` - localhost;
|
||||
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
||||
|
||||
A second and a third column represent internal cluster access modes. The last
|
||||
column illustrates an example URI to access the cluster APIs externally.
|
||||
Kubespray has nothing to do with it, this is informational only.
|
||||
|
||||
As you can see, the masters' internal API endpoints are always
|
||||
contacted via the local bind IP, which is `https://bip:sp`.
|
||||
|
||||
**Note** that for some cases, like healthchecks of applications deployed by
|
||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||
`kube_apiserver_insecure_port`.
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
# Kubespray (kargo) in own ansible playbooks repo
|
||||
|
||||
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
|
||||
Note:
|
||||
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||
* List of all forked repos could be retrieved from github page of original project.
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||
```
|
||||
[submodule "3d/kubespray"]
|
||||
path = 3d/kubespray
|
||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||
```
|
||||
|
||||
3. Configure git to show submodule status:
|
||||
```git config --global status.submoduleSummary true```
|
||||
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
|
||||
|
||||
5. Sync your master branch with upstream:
|
||||
```
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
|
||||
6. Create a new branch which you will use in your working environment:
|
||||
```git checkout -b work```
|
||||
***Never*** use master branch of your repository for your commits.
|
||||
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||
```
|
||||
...
|
||||
library = 3d/kubespray/library/
|
||||
roles_path = 3d/kubespray/roles/
|
||||
...
|
||||
```
|
||||
|
||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
```
|
||||
...
|
||||
#Kargo groups:
|
||||
[kube-node:children]
|
||||
kubenode
|
||||
|
||||
[k8s-cluster:children]
|
||||
kubernetes
|
||||
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[kube-master:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[vault:children]
|
||||
kube-master
|
||||
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
|
||||
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
|
||||
```
|
||||
- name: Include kargo tasks
|
||||
include: 3d/kubespray/cluster.yml
|
||||
```
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||
|
||||
# Contributing
|
||||
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||
|
||||
0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
||||
|
||||
1. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
2. Setup desired user.name and user.email for submodule.
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||
|
||||
3. Sync with upstream master:
|
||||
```
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
4. Create new branch for the specific fixes that you want to contribute:
|
||||
```git checkout -b fixes-name-date-index```
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||
```
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||
|
||||
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
Check that you're on correct branch:
|
||||
```git status```
|
||||
And pull changes from upstream (if any):
|
||||
```git pull --rebase upstream master```
|
||||
|
||||
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||
|
||||
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
@@ -1,103 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Distributed system such as Kubernetes are designed to be resilient to the
|
||||
failures. More details about Kubernetes High-Availability (HA) may be found at
|
||||
[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/)
|
||||
|
||||
To have a simple view the most of parts of HA will be skipped to describe
|
||||
Kubelet<->Controller Manager communication only.
|
||||
|
||||
By default the normal behavior looks like:
|
||||
|
||||
1. Kubelet updates it status to apiserver periodically, as specified by
|
||||
`--node-status-update-frequency`. The default value is **10s**.
|
||||
|
||||
2. Kubernetes controller manager checks the statuses of Kubelets every
|
||||
`–-node-monitor-period`. The default value is **5s**.
|
||||
|
||||
3. In case the status is updated within `--node-monitor-grace-period` of time,
|
||||
Kubernetes controller manager considers healthy status of Kubelet. The
|
||||
default value is **40s**.
|
||||
|
||||
> Kubernetes controller manager and Kubelets work asynchronously. It means that
|
||||
> the delay may include any network latency, API Server latency, etcd latency,
|
||||
> latency caused by load on one's master nodes and so on. So if
|
||||
> `--node-status-update-frequency` is set to 5s in reality it may appear in
|
||||
> etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum
|
||||
> nodes.
|
||||
|
||||
# Failure
|
||||
|
||||
Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
|
||||
`nodeStatusUpdateRetry` is constantly set to 5 in
|
||||
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
|
||||
|
||||
Kubelet will try to update the status in
|
||||
[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L345)
|
||||
function. Kubelet uses `http.Client()` Golang method, but has no specified
|
||||
timeout. Thus there may be some glitches when API Server is overloaded while
|
||||
TCP connection is established.
|
||||
|
||||
So, there will be `nodeStatusUpdateRetry` * `--node-status-update-frequency`
|
||||
attempts to set a status of node.
|
||||
|
||||
At the same time Kubernetes controller manager will try to check
|
||||
`nodeStatusUpdateRetry` times every `--node-monitor-period` of time. After
|
||||
`--node-monitor-grace-period` it will consider node unhealthy. It will remove
|
||||
its pods based on `--pod-eviction-timeout`
|
||||
|
||||
Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will
|
||||
notice and will update iptables of the node. It will remove endpoints from
|
||||
services so pods from failed node won't be accessible anymore.
|
||||
|
||||
# Recommendations for different cases
|
||||
|
||||
## Fast Update and Fast Reaction
|
||||
|
||||
If `-–node-status-update-frequency` is set to **4s** (10s is default).
|
||||
`--node-monitor-period` to **2s** (5s is default).
|
||||
`--node-monitor-grace-period` to **20s** (40s is default).
|
||||
`--pod-eviction-timeout` is set to **30s** (5m is default)
|
||||
|
||||
In such scenario, pods will be evicted in **50s** because the node will be
|
||||
considered as down after **20s**, and `--pod-eviction-timeout` occurs after
|
||||
**30s** more. However, this scenario creates an overhead on etcd as every node
|
||||
will try to update its status every 2 seconds.
|
||||
|
||||
If the environment has 1000 nodes, there will be 15000 node updates per
|
||||
minute which may require large etcd containers or even dedicated nodes for etcd.
|
||||
|
||||
> If we calculate the number of tries, the division will give 5, but in reality
|
||||
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
||||
> total number of attemtps will vary from 15 to 25 due to latency of all
|
||||
> components.
|
||||
|
||||
## Medium Update and Average Reaction
|
||||
|
||||
Let's set `-–node-status-update-frequency` to **20s**
|
||||
`--node-monitor-grace-period` to **2m** and `--pod-eviction-timeout` to **1m**.
|
||||
In that case, Kubelet will try to update status every 20s. So, it will be 6 * 5
|
||||
= 30 attempts before Kubernetes controller manager will consider unhealthy
|
||||
status of node. After 1m it will evict all pods. The total time will be 3m
|
||||
before eviction process.
|
||||
|
||||
Such scenario is good for medium environments as 1000 nodes will require 3000
|
||||
etcd updates per minute.
|
||||
|
||||
> In reality, there will be from 4 to 6 node update tries. The total number of
|
||||
> of attempts will vary from 20 to 30.
|
||||
|
||||
## Low Update and Slow reaction
|
||||
|
||||
Let's set `-–node-status-update-frequency` to **1m**.
|
||||
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
||||
to **1m**. In this scenario, every kubelet will try to update the status every
|
||||
minute. There will be 5 * 5 = 25 attempts before unhealty status. After 5m,
|
||||
Kubernetes controller manager will set unhealthy status. This means that pods
|
||||
will be evicted after 1m after being marked unhealthy. (6m in total).
|
||||
|
||||
> In reality, there will be from 3 to 5 tries. The total number of attempt will
|
||||
> vary from 15 to 25.
|
||||
|
||||
There can be different combinations such as Fast Update with Slow reaction to
|
||||
satisfy specific cases.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user