Compare commits

..

3 Commits

Author SHA1 Message Date
Antoine Legrand
a222be7fae fix tag push 2017-02-07 00:29:45 +01:00
Antoine Legrand
9d43cd86be WIP 2017-02-06 22:02:30 +01:00
Antoine Legrand
6ed99f1f44 WIP 2017-02-06 19:59:11 +01:00
428 changed files with 3070 additions and 11512 deletions

View File

@@ -24,7 +24,7 @@ explain why.
- **Version of Ansible** (`ansible --version`):
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
**Kargo version (commit) (`git rev-parse --short HEAD`):**
**Network plugin used**:

86
.gitignore vendored
View File

@@ -1,94 +1,14 @@
.vagrant
*.retry
inventory/vagrant_ansible_inventory
inventory/group_vars/fake_hosts.yml
inventory/host_vars/
temp
.idea
.tox
.cache
*.bak
*.egg-info
*.pyc
*.pyo
*.tfstate
*.tfstate.backup
**/*.sw[pon]
/ssh-bastion.conf
**/*.sw[pon]
vagrant/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
artifacts/
env/
build/
credentials/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# dotenv
.env
# virtualenv
venv/
ENV/

View File

@@ -18,7 +18,10 @@ variables:
# us-west1-a
before_script:
- pip install -r tests/requirements.txt
- pip install ansible==2.2.1.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- mkdir -p /.ssh
- cp tests/ansible.cfg .
@@ -44,23 +47,14 @@ before_script:
PRIVATE_KEY: $GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET
CLOUD_MACHINE_TYPE: "g1-small"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
BOOTSTRAP_OS: none
DOWNLOAD_LOCALHOST: "false"
DOWNLOAD_RUN_ONCE: "false"
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
RESOLVCONF_MODE: docker_dns
LOG_LEVEL: "-vv"
ETCD_DEPLOYMENT: "docker"
KUBELET_DEPLOYMENT: "host"
VAULT_DEPLOYMENT: "docker"
KUBELET_DEPLOYMENT: "docker"
WEAVE_CPU_LIMIT: "100m"
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
MAGIC: "ci check this"
.gce: &gce
@@ -73,40 +67,36 @@ before_script:
- $HOME/.cache
before_script:
- docker info
- pip install -r tests/requirements.txt
- pip install ansible==2.2.1.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- mkdir -p /.ssh
- cp tests/ansible.cfg .
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
- chmod 400 $HOME/.ssh/id_rsa
- ansible-playbook --version
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
script:
- pwd
- ls
- echo ${PWD}
- echo "${STARTUP_SCRIPT}"
- >
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e startup_script="'${STARTUP_SCRIPT}'"
# Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout 72ae7638bcc94c66afa8620dfa4ad9a9249327ea
# Create cluster
- >
@@ -114,151 +104,92 @@ before_script:
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e cert_management=${CERT_MGMT:-script}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e deploy_netchecker=true
-e download_localhost=true
-e download_run_once=true
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml
# Repeat deployment if testing upgrade
- >
if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
# Tests Cases
## Test Master API
- >
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e ansible_python_interpreter=${PYPATH}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
reset.yml;
fi
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e ansible_python_interpreter=${PYPATH}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
@@ -276,51 +207,21 @@ before_script:
-e cloud_region=${CLOUD_REGION}
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables
.coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
CLOUD_REGION: us-west1-b
CLOUD_MACHINE_TYPE: "n1-standard-2"
CLUSTER_MODE: aio
CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
##User-data to simply turn off coreos upgrades
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables
.debian8_canal_ha_variables: &debian8_canal_ha_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: europe-west1-b
CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-east1-b
CLUSTER_MODE: ha
UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""
.centos_weave_kubeadm_variables: &centos_weave_kubeadm_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: weave
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_IMAGE: centos-7
CLOUD_MACHINE_TYPE: "n1-standard-1"
CLOUD_REGION: us-central1-b
CLUSTER_MODE: ha
KUBEADM_ENABLED: "true"
UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_MACHINE_TYPE: "n1-standard-1"
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha
KUBEADM_ENABLED: "true"
STARTUP_SCRIPT: ""
.rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-gce-part1
@@ -328,35 +229,30 @@ before_script:
CLOUD_IMAGE: rhel-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.centos7_flannel_variables: &centos7_flannel_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: centos-7
CLOUD_REGION: us-west1-a
CLOUD_MACHINE_TYPE: "n1-standard-2"
CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.debian8_calico_variables: &debian8_calico_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: debian-8-kubespray
CLOUD_REGION: us-central1-b
CLUSTER_MODE: default
STARTUP_SCRIPT: ""
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
CLOUD_REGION: us-east1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: coreos
IDEMPOT_CHECK: "true"
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
IDEMPOT_CHECK: "true"
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
# stage: deploy-gce-special
@@ -364,7 +260,6 @@ before_script:
CLOUD_IMAGE: rhel-7
CLOUD_REGION: us-east1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
# stage: deploy-gce-special
@@ -373,28 +268,22 @@ before_script:
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
IDEMPOT_CHECK: "false"
STARTUP_SCRIPT: ""
.centos7_calico_ha_variables: &centos7_calico_ha_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: calico
DOWNLOAD_LOCALHOST: "true"
DOWNLOAD_RUN_ONCE: "true"
CLOUD_IMAGE: centos-7
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha-scale
CLUSTER_MODE: ha
IDEMPOT_CHECK: "true"
STARTUP_SCRIPT: ""
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
# stage: deploy-gce-special
KUBE_NETWORK_PLUGIN: weave
CLOUD_IMAGE: coreos-alpha-1506-0-0-v20170817
CLOUD_IMAGE: coreos-alpha
CLOUD_REGION: us-west1-a
CLUSTER_MODE: ha-scale
CLUSTER_MODE: ha
BOOTSTRAP_OS: coreos
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
# stage: deploy-gce-part1
@@ -404,36 +293,15 @@ before_script:
CLUSTER_MODE: separate
ETCD_DEPLOYMENT: rkt
KUBELET_DEPLOYMENT: rkt
STARTUP_SCRIPT: ""
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
# stage: deploy-gce-part1
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_MACHINE_TYPE: "n1-standard-2"
KUBE_NETWORK_PLUGIN: canal
CERT_MGMT: vault
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: us-central1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
# stage: deploy-gce-special
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
coreos-calico-aio:
coreos-calico-sep:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_calico_aio_variables
<<: *coreos_calico_sep_variables
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
@@ -444,7 +312,7 @@ coreos-calico-sep-triggers:
<<: *gce
variables:
<<: *gce_variables
<<: *coreos_calico_aio_variables
<<: *coreos_calico_sep_variables
when: on_success
only: ['triggers']
@@ -491,66 +359,24 @@ ubuntu-weave-sep-triggers:
only: ['triggers']
# More builds for PRs/merges (manual) and triggers (auto)
ubuntu-canal-ha-rbac:
debian8-canal-ha:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_rbac_variables
<<: *debian8_canal_ha_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-canal-ha-rbac-triggers:
debian8-canal-ha-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_rbac_variables
when: on_success
only: ['triggers']
ubuntu-canal-kubeadm-rbac:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-canal-kubeadm-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: on_success
only: ['triggers']
centos-weave-kubeadm-rbac:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
centos-weave-kubeadm-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *centos_weave_kubeadm_variables
<<: *debian8_canal_ha_variables
when: on_success
only: ['triggers']
@@ -575,7 +401,7 @@ rhel7-weave-triggers:
when: on_success
only: ['triggers']
debian8-calico-upgrade:
debian8-calico:
stage: deploy-gce-part2
<<: *job
<<: *gce
@@ -682,28 +508,6 @@ ubuntu-rkt-sep:
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-vault-sep:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_vault_sep_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-flannel-rbac-sep:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_flannel_rbac_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# Premoderated with manual actions
ci-authorized:
<<: *job
@@ -713,22 +517,12 @@ ci-authorized:
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
syntax-check:
<<: *job
stage: unit-tests
script:
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
except: ['triggers', 'master']
yamllint:
<<: *job
stage: unit-tests
script:
- yamllint roles
except: ['triggers', 'master']
tox-inventory-builder:

161
.travis.yml.bak Normal file
View File

@@ -0,0 +1,161 @@
sudo: required
services:
- docker
git:
depth: 5
env:
global:
GCE_USER=travis
SSH_USER=$GCE_USER
TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
BOOTSTRAP_OS=none
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=asia-east1-a
CLUSTER_MODE=ha
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-c
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7
CLOUD_REGION=asia-northeast1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7
CLOUD_REGION=us-central1-b
CLUSTER_MODE=ha
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7
CLOUD_REGION=us-east1-c
CLUSTER_MODE=default
# CoreOS stable
#- >-
# KUBE_NETWORK_PLUGIN=weave
# CLOUD_IMAGE=coreos-stable
# CLOUD_REGION=europe-west1-b
# CLUSTER_MODE=ha
# BOOTSTRAP_OS=coreos
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-west1-b
CLUSTER_MODE=default
BOOTSTRAP_OS=coreos
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=rhel-7
CLOUD_REGION=asia-northeast1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=europe-west1-d
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-central1-f
CLUSTER_MODE=separate
BOOTSTRAP_OS=coreos
matrix:
allow_failures:
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
before_install:
# Install Ansible.
- pip install --user ansible
- pip install --user netaddr
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
- pip install --user boto==2.9.0 -U
# Load cached docker images
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
cache:
- directories:
- $HOME/.cache/pip
- $HOME/.local
- /var/tmp/releases
before_script:
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- chmod 400 $HOME/.ssh/id_rsa
- chmod 755 $HOME/.local/bin/ansible-playbook
- $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Create cluster with netchecker app deployed
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e local_release_dir=/var/tmp/releases
-e deploy_netchecker=true
cluster.yml
# Tests Cases
## Test Master API
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
after_script:
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}

View File

@@ -1,16 +0,0 @@
---
extends: default
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 1
indentation:
spaces: 2
indent-sequences: consistent
line-length: disable
new-line-at-end-of-file: disable
truthy: disable

View File

@@ -1,8 +1,8 @@
![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
## Deploy a production ready kubernetes cluster
##Deploy a production ready kubernetes cluster
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**.
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
- **High available** cluster
@@ -13,16 +13,15 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
To deploy the cluster you can use :
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
* [Requirements](#requirements)
* [Kubespray vs ...](docs/comparisons.md)
* [Kargo vs ...](docs/comparisons.md)
* [Getting started](docs/getting-started.md)
* [Ansible inventory and tags](docs/ansible.md)
* [Integration with existing ansible repo](docs/integration.md)
* [Deployment data variables](docs/vars.md)
* [DNS stack](docs/dns-stack.md)
* [HA mode](docs/ha-mode.md)
@@ -34,7 +33,6 @@ To deploy the cluster you can use :
* [OpenStack](docs/openstack.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
* [vSphere](docs/vsphere.md)
* [Large deployments](docs/large-deployments.md)
* [Upgrades basics](docs/upgrades.md)
* [Roadmap](docs/roadmap.md)
@@ -52,19 +50,16 @@ Note: Upstart/SysV init based OS types are not supported.
Versions of supported components
--------------------------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[weave](http://weave.works/) v2.0.1 <br>
[docker](https://www.docker.com/) v1.13 (see note)<br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
[weave](http://weave.works/) v1.6.1 <br>
[docker](https://www.docker.com/) v1.12.5 <br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note 2: rkt support as docker alternative is limited to control plane (etcd and
Note: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.
@@ -72,19 +67,16 @@ plugins can be deployed for a given single cluster.
Requirements
--------------
* **Ansible v2.3 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands**
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
* The target servers must have **access to the Internet** in order to pull docker images.
* The target servers are configured to allow **IPv4 forwarding**.
* **Your ssh key must be copied** to all the servers part of your inventory.
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
* The target servers are configured to allow **IPv4 forwarding**.
* **Copy your ssh keys** to all the servers part of your inventory.
* **Ansible v2.2 (or newer) and python-netaddr**
## Network plugins
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
You can choose between 4 network plugins. (default: `flannel` with vxlan backend)
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
@@ -92,30 +84,18 @@ You can choose between 4 network plugins. (default: `calico`, except Vagrant use
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md).
## Community docs and resources
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## Tools and projects on top of Kubespray
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
## CI Tests
![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
CI/end-to-end tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
See the [test matrix](docs/test_cases.md) for details.

View File

@@ -1,16 +1,16 @@
# Release Process
The Kubespray Project is released on an as-needed basis. The process is as follows:
The Kargo Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
2. At least one of the [OWNERS](OWNERS) must LGTM this release
2. At least on of the [OWNERS](OWNERS) must LGTM this release
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
4. The release issue is closed
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
## Major/minor releases, merge freezes and milestones
* Kubespray does not maintain stable branches for releases. Releases are tags, not
* Kargo does not maintain stable branches for releases. Releases are tags, not
branches, and there are no backports. Therefore, there is no need for merge
freezes as well.
@@ -20,21 +20,24 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
support lifetime, which ends once the milestone closed. Then only a next major
or minor release can be done.
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
version numbers and other components' arbitrary versions, like etcd or network plugins.
Older or newer versions are not supported and not tested for the given release.
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
* There is no unstable releases and no APIs, thus Kargo doesn't follow
[semver](http://semver.org/). Every version describes only a stable release.
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
playbooks, shall be described in the release notes. Other breaking changes, if any in
the contributed addons or bound versions of Kubernetes and other components, are
considered out of Kubespray scope and are up to the components' teams to deal with and
considered out of Kargo scope and are up to the components' teams to deal with and
document.
* Minor releases can change components' versions, but not the major ``kube_version``.
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
and *any* changes to other components, like etcd v4, or calico 1.2.3.
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
foo
foo
foo

54
Vagrantfile vendored
View File

@@ -7,16 +7,6 @@ Vagrant.require_version ">= 1.8.0"
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
"centos" => {box: "bento/centos-7.3", bootstrap_os: "centos", user: "vagrant"},
}
# Defaults for config options defined in CONFIG
$num_instances = 3
$instance_name_prefix = "k8s"
@@ -26,12 +16,7 @@ $vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
$os = "ubuntu"
# The first three nodes are etcd servers
$etcd_instances = $num_instances
# The first two nodes are masters
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
$local_release_dir = "/vagrant/temp"
$box = "bento/ubuntu-16.04"
host_vars = {}
@@ -39,10 +24,6 @@ if File.exist?(CONFIG)
require CONFIG
end
# All nodes are kube nodes
$kube_node_instances = $num_instances
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
@@ -68,10 +49,7 @@ Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
config.vm.box = $box
if SUPPORTED_OS[$os].has_key? :box_url
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
end
config.ssh.username = SUPPORTED_OS[$os][:user]
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
@@ -102,10 +80,6 @@ Vagrant.configure("2") do |config|
end
end
$shared_folders.each do |src, dst|
config.vm.synced_folder src, dst
end
config.vm.provider :virtualbox do |vb|
vb.gui = $vm_gui
vb.memory = $vm_memory
@@ -114,15 +88,12 @@ Vagrant.configure("2") do |config|
ip = "#{$subnet}.#{i+100}"
host_vars[vm_name] = {
"ip": ip,
"flannel_interface": ip,
"flannel_backend_type": "host-gw",
"local_release_dir" => $local_release_dir,
"download_run_once": "False",
# Override the default 'calico' with flannel.
# inventory/group_vars/k8s-cluster.yml
"kube_network_plugin": "flannel",
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os]
"ip" => ip,
#"access_ip" => ip,
"flannel_interface" => ip,
"flannel_backend_type" => "host-gw",
"local_release_dir" => "/vagrant/temp",
"download_run_once" => "False"
}
config.vm.network :private_network, ip: ip
@@ -141,9 +112,12 @@ Vagrant.configure("2") do |config|
ansible.host_vars = host_vars
#ansible.tags = ['download']
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
# The first three nodes should be etcd servers
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
# The first two nodes should be masters
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
# all nodes should be kube nodes
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
"k8s-cluster:children" => ["kube-master", "kube-node"],
}
end

View File

@@ -1,7 +1,6 @@
[ssh_connection]
pipelining=True
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
[defaults]
host_key_checking=False
@@ -10,5 +9,3 @@ fact_caching = jsonfile
fact_caching_connection = /tmp
stdout_callback = skippy
library = ./library
callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles

View File

@@ -2,104 +2,64 @@
- hosts: localhost
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- bastion-ssh-config
tags: [localhost, bastion]
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
gather_facts: false
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- bootstrap-os
tags:
- bootstrap-os
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- { role: rkt, tags: rkt, when: "'rkt' in [ etcd_deployment_type, kubelet_deployment_type ]" }
- hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
- hosts: etcd:!k8s-cluster
any_errors_fatal: true
roles:
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
- { role: etcd, tags: etcd }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd }
- { role: kubernetes/node, tags: node }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/master, tags: master }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
- { role: network_plugin, tags: network }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: kubernetes/master, tags: master }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes/client, tags: client }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
- hosts: kube-master[0]
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
any_errors_fatal: true
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps, tags: apps }

View File

@@ -32,7 +32,8 @@ Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
opening an issue or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
@@ -52,7 +53,7 @@ The Kubernetes team does not condone any statements by speakers contrary to thes
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
be engaging in discriminatory or offensive speech or actions.
Please bring any concerns to the immediate attention of Kubernetes event staff.
Please bring any concerns to to the immediate attention of Kubernetes event staff
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]()

View File

@@ -1,27 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}

View File

@@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -1,45 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}

View File

@@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env python
import boto3
import os
import argparse
import json
class SearchEC2Tags(object):
def __init__(self):
self.parse_args()
if self.args.list:
self.search_tags()
if self.args.host:
data = {}
print json.dumps(data, indent=2)
def parse_args(self):
##Check if VPC_VISIBILITY is set, if not default to private
if "VPC_VISIBILITY" in os.environ:
self.vpc_visibility = os.environ['VPC_VISIBILITY']
else:
self.vpc_visibility = "private"
##Support --list and --host flags. We largely ignore the host one.
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def search_tags(self):
hosts = {}
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
if self.vpc_visibility == "public":
hosts[group].append(instance.public_dns_name)
hosts['_meta']['hostvars'][instance.public_dns_name] = {
'ansible_ssh_host': instance.public_ip_address
}
else:
hosts[group].append(instance.private_dns_name)
hosts['_meta']['hostvars'][instance.private_dns_name] = {
'ansible_ssh_host': instance.private_ip_address
}
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
print json.dumps(hosts, sort_keys=True, indent=2)
SearchEC2Tags()

View File

@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
## Status
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
## Requirements
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Generating an inventory for kubespray
## Generating an inventory for kargo
After you have applied the templates, you can generate an inventory with this call:
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
$ ./generate-inventory.sh <resource_group_name>
```
It will create the file ./inventory which can then be used with kubespray, e.g.:
It will create the file ./inventory which can then be used with kargo, e.g.:
```shell
$ cd kubespray-root-dir
$ cd kargo-root-dir
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
```

View File

@@ -9,18 +9,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-templates.yml
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
else
echo "Azure cli not found"
fi
ansible-playbook generate-templates.yml
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -9,10 +9,6 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
else
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
fi
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -8,11 +8,5 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
# check if azure cli 2.0 exists else use azure cli 1.0
if az &>/dev/null; then
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
else
echo "Azure cli not found"
fi
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"

View File

@@ -1,5 +0,0 @@
---
- hosts: localhost
gather_facts: False
roles:
- generate-inventory_2

View File

@@ -1,6 +1,5 @@
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
# this name must be globally unique - it will be used as a prefix for azure components
# Due to some Azure limitations, this name must be globally unique
cluster_name: example
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
@@ -18,29 +17,10 @@ minions_os_disk_size: 1000
admin_username: devops
admin_password: changeme
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
disablePasswordAuthentication: true
# Azure CIDRs
azure_vnet_cidr: 10.0.0.0/8
azure_admin_cidr: 10.241.2.0/24
azure_masters_cidr: 10.0.4.0/24
azure_minions_cidr: 10.240.0.0/16
# Azure loadbalancer port to use to access your cluster
kube_apiserver_port: 6443
# Azure Netwoking and storage naming to use with inventory/all.yml
#azure_virtual_network_name: KubeVNET
#azure_subnet_admin_name: ad-subnet
#azure_subnet_masters_name: master-subnet
#azure_subnet_minions_name: minion-subnet
#azure_route_table_name: routetable
#azure_security_group_name: secgroup
# Storage types available are: "Standard_LRS","Premium_LRS"
#azure_storage_account_type: Standard_LRS

View File

@@ -8,4 +8,4 @@
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,16 +0,0 @@
---
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"

View File

@@ -1,34 +0,0 @@
{% for vm in vm_ip_list %}
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% else %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% endif %}
{% endfor %}
[kube-master]
{% for vm in vm_roles_list %}
{% if 'kube-master' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_roles_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube-node]
{% for vm in vm_roles_list %}
{% if 'kube-node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s-cluster:children]
kube-node
kube-master

View File

@@ -1,15 +1,15 @@
apiVersion: "2015-06-15"
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
virtualNetworkName: "KubVNET"
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
subnetAdminName: "ad-subnet"
subnetMastersName: "master-subnet"
subnetMinionsName: "minion-subnet"
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
routeTableName: "routetable"
securityGroupName: "secgroup"
nameSuffix: "{{ cluster_name }}"
nameSuffix: "{{cluster_name}}"
availabilitySetMasters: "master-avs"
availabilitySetMinions: "minion-avs"
@@ -33,5 +33,5 @@ imageReference:
imageReferenceJson: "{{imageReference|to_json}}"
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
storageAccountType: "Standard_LRS"

View File

@@ -62,8 +62,8 @@
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
},
"protocol": "tcp",
"frontendPort": "{{kube_apiserver_port}}",
"backendPort": "{{kube_apiserver_port}}",
"frontendPort": 443,
"backendPort": 443,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 5,
"probe": {
@@ -77,7 +77,7 @@
"name": "kube-api",
"properties": {
"protocol": "tcp",
"port": "{{kube_apiserver_port}}",
"port": 443,
"intervalInSeconds": 5,
"numberOfProbes": 2
}
@@ -193,4 +193,4 @@
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}
}

View File

@@ -92,7 +92,7 @@
"description": "Allow secure kube-api",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "{{kube_apiserver_port}}",
"destinationPortRange": "443",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
@@ -106,4 +106,4 @@
"dependsOn": []
}
]
}
}

View File

@@ -41,7 +41,7 @@ import re
import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
'calico-rr', 'vault']
'calico-rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
# Configurable as shell vars end
class KubesprayInventory(object):
class KargoInventory(object):
def __init__(self, changed_hosts=None, config_file=None):
self.config = configparser.ConfigParser(allow_no_value=True,
@@ -250,7 +250,6 @@ class KubesprayInventory(object):
def set_etcd(self, hosts):
for host in hosts:
self.add_host_to_group('etcd', host)
self.add_host_to_group('vault', host)
def load_file(self, files=None):
'''Directly loads JSON, or YAML file to inventory.'''
@@ -338,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
def main(argv=None):
if not argv:
argv = sys.argv[1:]
KubesprayInventory(argv, CONFIG_FILE)
KargoInventory(argv, CONFIG_FILE)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,3 +1,3 @@
[metadata]
name = kubespray-inventory-builder
name = kargo-inventory-builder
version = 0.1

View File

@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
sys_mock.exit = mock.Mock()
super(TestInventory, self).setUp()
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
self.inv = inventory.KubesprayInventory()
self.inv = inventory.KargoInventory()
def test_get_ip_from_opts(self):
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"

View File

@@ -1,11 +0,0 @@
# Kubespray on KVM Virtual Machines hypervisor preparation
A simple playbook to ensure your system has the right settings to enable Kubespray
deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
### User creation
If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

View File

@@ -1,3 +0,0 @@
#k8s_deployment_user: kubespray
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa

View File

@@ -1,8 +0,0 @@
---
- hosts: localhost
gather_facts: False
become: yes
vars:
- bootstrap_os: none
roles:
- kvm-setup

View File

@@ -1,46 +0,0 @@
---
- name: Upgrade all packages to the latest version (yum)
yum:
name: '*'
state: latest
when: ansible_os_family == "RedHat"
- name: Install required packages
yum:
name: "{{ item }}"
state: latest
with_items:
- bind-utils
- ntp
when: ansible_os_family == "RedHat"
- name: Install required packages
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 3600
name: "{{ item }}"
state: latest
install_recommends: no
with_items:
- dnsutils
- ntp
when: ansible_os_family == "Debian"
- name: Upgrade all packages to the latest version (apt)
shell: apt-get -o \
Dpkg::Options::=--force-confdef -o \
Dpkg::Options::=--force-confold -q -y \
dist-upgrade
environment:
DEBIAN_FRONTEND: noninteractive
when: ansible_os_family == "Debian"
# Create deployment user if required
- include: user.yml
when: k8s_deployment_user is defined
# Set proper sysctl values
- include: sysctl.yml

View File

@@ -1,46 +0,0 @@
---
- name: Load br_netfilter module
modprobe:
name: br_netfilter
state: present
register: br_netfilter
- name: Add br_netfilter into /etc/modules
lineinfile:
dest: /etc/modules
state: present
line: 'br_netfilter'
when: br_netfilter is defined and ansible_os_family == 'Debian'
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
copy:
dest: /etc/modules-load.d/kubespray.conf
content: |-
### This file is managed by Ansible
br-netfilter
owner: root
group: root
mode: 0644
when: br_netfilter is defined
- name: Enable net.ipv4.ip_forward in sysctl
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
state: present
reload: yes
- name: Set bridge-nf-call-{arptables,iptables} to 0
sysctl:
name: "{{ item }}"
state: present
value: 0
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
reload: yes
with_items:
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
- net.bridge.bridge-nf-call-iptables
when: br_netfilter is defined

View File

@@ -1,46 +0,0 @@
---
- name: Create user {{ k8s_deployment_user }}
user:
name: "{{ k8s_deployment_user }}"
groups: adm
shell: /bin/bash
- name: Ensure that .ssh exists
file:
path: "/home/{{ k8s_deployment_user }}/.ssh"
state: directory
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
- name: Configure sudo for deployment user
copy:
content: |
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
dest: "/etc/sudoers.d/55-k8s-deployment"
owner: root
group: root
mode: 0644
- name: Write private SSH key
copy:
src: "{{ k8s_deployment_user_pkey_path }}"
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
mode: 0400
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined
- name: Write public SSH key
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
args:
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
when: k8s_deployment_user_pkey_path is defined
- name: Fix ssh-pub-key permissions
file:
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
mode: 0600
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined

View File

@@ -1,4 +1,4 @@
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
# Deploying a Kargo Kubernetes Cluster with GlusterFS
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
@@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
```
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
## Using Terraform and Ansible
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
```
cluster_name = "cluster1"
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
```
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
If you need to destroy the cluster, you can run:
```
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
```

View File

@@ -1,60 +0,0 @@
%global srcname ansible_kubespray
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
Name: ansible-kubespray
Version: XXX
Release: XXX
Summary: Ansible modules for installing Kubernetes
Group: System Environment/Libraries
License: ASL 2.0
Vendor: Kubespray <smainklh@gmail.com>
Url: https://github.com/kubernetes-incubator/kubespray
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz
BuildArch: noarch
BuildRequires: git
BuildRequires: python2-devel
BuildRequires: python-setuptools
BuildRequires: python-d2to1
BuildRequires: python-pbr
Requires: ansible
Requires: python-jinja2
Requires: python-netaddr
%description
Ansible-kubespray is a set of Ansible modules and playbooks for
installing a Kubernetes cluster. If you have questions, join us
on the https://slack.k8s.io, channel '#kubespray'.
%prep
%autosetup -n %{name}-%{upstream_version} -S git
%build
%{__python2} setup.py build
%install
export PBR_VERSION=%{version}
export SKIP_PIP_INSTALL=1
%{__python2} setup.py install --skip-build --root %{buildroot}
%files
%doc README.md
%doc inventory/inventory.example
%config /etc/kubespray/ansible.cfg
%config /etc/kubespray/inventory/group_vars/all.yml
%config /etc/kubespray/inventory/group_vars/k8s-cluster.yml
%license LICENSE
%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info
/usr/local/share/kubespray/roles/
/usr/local/share/kubespray/playbooks/
%defattr(-,root,root)
%changelog

View File

@@ -1,2 +1,2 @@
*.tfstate*
.terraform
inventory

View File

@@ -0,0 +1,261 @@
variable "deploymentName" {
type = "string"
description = "The desired name of your deployment."
}
variable "numControllers"{
type = "string"
description = "Desired # of controllers."
}
variable "numEtcd" {
type = "string"
description = "Desired # of etcd nodes. Should be an odd number."
}
variable "numNodes" {
type = "string"
description = "Desired # of nodes."
}
variable "volSizeController" {
type = "string"
description = "Volume size for the controllers (GB)."
}
variable "volSizeEtcd" {
type = "string"
description = "Volume size for etcd (GB)."
}
variable "volSizeNodes" {
type = "string"
description = "Volume size for nodes (GB)."
}
variable "subnet" {
type = "string"
description = "The subnet in which to put your cluster."
}
variable "securityGroups" {
type = "string"
description = "The sec. groups in which to put your cluster."
}
variable "ami"{
type = "string"
description = "AMI to use for all VMs in cluster."
}
variable "SSHKey" {
type = "string"
description = "SSH key to use for VMs."
}
variable "master_instance_type" {
type = "string"
description = "Size of VM to use for masters."
}
variable "etcd_instance_type" {
type = "string"
description = "Size of VM to use for etcd."
}
variable "node_instance_type" {
type = "string"
description = "Size of VM to use for nodes."
}
variable "terminate_protect" {
type = "string"
default = "false"
}
variable "awsRegion" {
type = "string"
}
provider "aws" {
region = "${var.awsRegion}"
}
variable "iam_prefix" {
type = "string"
description = "Prefix name for IAM profiles"
}
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
name = "${var.iam_prefix}_kubernetes_master_profile"
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
}
resource "aws_iam_role" "kubernetes_master_role" {
name = "${var.iam_prefix}_kubernetes_master_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "kubernetes_master_policy" {
name = "${var.iam_prefix}_kubernetes_master_policy"
role = "${aws_iam_role.kubernetes_master_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
name = "${var.iam_prefix}_kubernetes_node_profile"
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
}
resource "aws_iam_role" "kubernetes_node_role" {
name = "${var.iam_prefix}_kubernetes_node_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "kubernetes_node_policy" {
name = "${var.iam_prefix}_kubernetes_node_policy"
role = "${aws_iam_role.kubernetes_node_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
}
]
}
EOF
}
resource "aws_instance" "master" {
count = "${var.numControllers}"
ami = "${var.ami}"
instance_type = "${var.master_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
root_block_device {
volume_size = "${var.volSizeController}"
}
tags {
Name = "${var.deploymentName}-master-${count.index + 1}"
}
}
resource "aws_instance" "etcd" {
count = "${var.numEtcd}"
ami = "${var.ami}"
instance_type = "${var.etcd_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
root_block_device {
volume_size = "${var.volSizeEtcd}"
}
tags {
Name = "${var.deploymentName}-etcd-${count.index + 1}"
}
}
resource "aws_instance" "minion" {
count = "${var.numNodes}"
ami = "${var.ami}"
instance_type = "${var.node_instance_type}"
subnet_id = "${var.subnet}"
vpc_security_group_ids = ["${var.securityGroups}"]
key_name = "${var.SSHKey}"
disable_api_termination = "${var.terminate_protect}"
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
root_block_device {
volume_size = "${var.volSizeNodes}"
}
tags {
Name = "${var.deploymentName}-minion-${count.index + 1}"
}
}
output "kubernetes_master_profile" {
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
}
output "kubernetes_node_profile" {
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
}
output "master-ip" {
value = "${join(", ", aws_instance.master.*.private_ip)}"
}
output "etcd-ip" {
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
}
output "minion-ip" {
value = "${join(", ", aws_instance.minion.*.private_ip)}"
}

View File

@@ -0,0 +1,37 @@
variable "SSHUser" {
type = "string"
description = "SSH User for VMs."
}
resource "null_resource" "ansible-provision" {
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
##Create Master Inventory
provisioner "local-exec" {
command = "echo \"[kube-master]\" > inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
}
##Create ETCD Inventory
provisioner "local-exec" {
command = "echo \"\n[etcd]\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
}
##Create Nodes Inventory
provisioner "local-exec" {
command = "echo \"\n[kube-node]\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
}
provisioner "local-exec" {
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
}
}

View File

@@ -2,69 +2,27 @@
**Overview:**
This project will create:
* VPC with Public and Private Subnets in # Availability Zones
* Bastion Hosts and NAT Gateways in the Public Subnet
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
* even distributed over the # of Availability Zones
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
- This will create nodes in a VPC inside of AWS
**Requirements**
- Terraform 0.8.7 or newer
- A dynamic number of masters, etcd, and nodes can be created
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
**How to Use:**
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
- Export the variables for your Amazon credentials:
```
export AWS_ACCESS_KEY_ID="www"
export AWS_SECRET_ACCESS_KEY ="xxx"
export AWS_SSH_KEY_NAME="yyy"
export AWS_DEFAULT_REGION="zzz"
```
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
- Update `contrib/terraform/aws/terraform.tfvars` with your data
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example:
```commandline
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
export AWS_ACCESS_KEY_ID="xxx"
export AWS_SECRET_ACCESS_KEY="yyy"
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Update contrib/terraform/aws/terraform.tfvars with your data
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
```commandline
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
```
- Run with `terraform apply`
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
Example (this one assumes you are using CoreOS)
```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
```
**Future Work:**
**Troubleshooting**
***Remaining AWS IAM Instance Profile***:
If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command:
```
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
```
***Ansible Inventory doesnt get created:***
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
**Architecture**
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
![AWS Infrastructure with Terraform ](docs/aws_kubespray.png)
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.

View File

@@ -1,190 +0,0 @@
terraform {
required_version = ">= 0.8.7"
}
provider "aws" {
access_key = "${var.AWS_ACCESS_KEY_ID}"
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
region = "${var.AWS_DEFAULT_REGION}"
}
/*
* Calling modules who create the initial AWS VPC / AWS ELB
* and AWS IAM Roles for Kubernetes Deployment
*/
module "aws-vpc" {
source = "modules/vpc"
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones="${var.aws_avail_zones}"
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
}
module "aws-elb" {
source = "modules/elb"
aws_cluster_name="${var.aws_cluster_name}"
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
aws_avail_zones="${var.aws_avail_zones}"
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}"
}
module "aws-iam" {
source = "modules/iam"
aws_cluster_name="${var.aws_cluster_name}"
}
/*
* Create Bastion Instances in AWS
*
*/
resource "aws_instance" "bastion-server" {
ami = "${var.aws_bastion_ami}"
instance_type = "${var.aws_bastion_size}"
count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true
availability_zone = "${element(var.aws_avail_zones,count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
key_name = "${var.AWS_SSH_KEY_NAME}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
Cluster = "${var.aws_cluster_name}"
Role = "bastion-${var.aws_cluster_name}-${count.index}"
}
}
/*
* Create K8s Master and worker nodes and etcd instances
*
*/
resource "aws_instance" "k8s-master" {
ami = "${var.aws_cluster_ami}"
instance_type = "${var.aws_kube_master_size}"
count = "${var.aws_kube_master_num}"
availability_zone = "${element(var.aws_avail_zones,count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
Cluster = "${var.aws_cluster_name}"
Role = "master"
}
}
resource "aws_elb_attachment" "attach_master_nodes" {
count = "${var.aws_kube_master_num}"
elb = "${module.aws-elb.aws_elb_api_id}"
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
}
resource "aws_instance" "k8s-etcd" {
ami = "${var.aws_cluster_ami}"
instance_type = "${var.aws_etcd_size}"
count = "${var.aws_etcd_num}"
availability_zone = "${element(var.aws_avail_zones,count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
key_name = "${var.AWS_SSH_KEY_NAME}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
Cluster = "${var.aws_cluster_name}"
Role = "etcd"
}
}
resource "aws_instance" "k8s-worker" {
ami = "${var.aws_cluster_ami}"
instance_type = "${var.aws_kube_worker_size}"
count = "${var.aws_kube_worker_num}"
availability_zone = "${element(var.aws_avail_zones,count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
Cluster = "${var.aws_cluster_name}"
Role = "worker"
}
}
/*
* Create Kubespray Inventory File
*
*/
data "template_file" "inventory" {
template = "${file("${path.module}/templates/inventory.tpl")}"
vars {
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
}
triggers {
template = "${data.template_file.inventory.rendered}"
}
}

View File

@@ -1,8 +0,0 @@
#AWS Access Key
AWS_ACCESS_KEY_ID = ""
#AWS Secret Key
AWS_SECRET_ACCESS_KEY = ""
#EC2 SSH Key Name
AWS_SSH_KEY_NAME = ""
#AWS Region
AWS_DEFAULT_REGION = "eu-central-1"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

View File

@@ -1,58 +0,0 @@
resource "aws_security_group" "aws-elb" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
vpc_id = "${var.aws_vpc_id}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
}
}
resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress"
from_port = "${var.aws_elb_api_port}"
to_port = "${var.k8s_secure_api_port}"
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.aws-elb.id}"
}
resource "aws_security_group_rule" "aws-allow-api-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.aws-elb.id}"
}
# Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = ["${var.aws_subnet_ids_public}"]
security_groups = ["${aws_security_group.aws-elb.id}"]
listener {
instance_port = "${var.k8s_secure_api_port}"
instance_protocol = "tcp"
lb_port = "${var.aws_elb_api_port}"
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8080/"
interval = 30
}
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
tags {
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
}
}

View File

@@ -1,7 +0,0 @@
output "aws_elb_api_id" {
value = "${aws_elb.aws-elb-api.id}"
}
output "aws_elb_api_fqdn" {
value = "${aws_elb.aws-elb-api.dns_name}"
}

View File

@@ -1,28 +0,0 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_vpc_id" {
description = "AWS VPC ID"
}
variable "aws_elb_api_port" {
description = "Port for AWS ELB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = "list"
}
variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets"
type = "list"
}

View File

@@ -1,138 +0,0 @@
#Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
#Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = "${aws_iam_role.kube-master.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}
EOF
}
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = "${aws_iam_role.kube-worker.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube-master" {
name = "kube_${var.aws_cluster_name}_master_profile"
roles = ["${aws_iam_role.kube-master.name}"]
}
resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile"
roles = ["${aws_iam_role.kube-worker.name}"]
}

View File

@@ -1,7 +0,0 @@
output "kube-master-profile" {
value = "${aws_iam_instance_profile.kube-master.name }"
}
output "kube-worker-profile" {
value = "${aws_iam_instance_profile.kube-worker.name }"
}

View File

@@ -1,3 +0,0 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}

View File

@@ -1,138 +0,0 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = "${var.aws_vpc_cidr_block}"
#DNS Related Entries
enable_dns_support = true
enable_dns_hostnames = true
tags {
Name = "kubernetes-${var.aws_cluster_name}-vpc"
}
}
resource "aws_eip" "cluster-nat-eip" {
count = "${length(var.aws_cidr_subnets_public)}"
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
}
}
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
}
}
resource "aws_nat_gateway" "cluster-nat-gateway" {
count = "${length(var.aws_cidr_subnets_public)}"
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
}
}
#Routing in VPC
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
}
tags {
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
}
}
resource "aws_route_table" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
}
tags {
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
}
}
resource "aws_route_table_association" "kubernetes-public" {
count = "${length(var.aws_cidr_subnets_public)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
route_table_id = "${aws_route_table.kubernetes-public.id}"
}
resource "aws_route_table_association" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags {
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
}
}
resource "aws_security_group_rule" "allow-all-ingress" {
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-all-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-ssh-connections" {
type = "ingress"
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.kubernetes.id}"
}

View File

@@ -1,16 +0,0 @@
output "aws_vpc_id" {
value = "${aws_vpc.cluster-vpc.id}"
}
output "aws_subnet_ids_private" {
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
}
output "aws_subnet_ids_public" {
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
}
output "aws_security_group" {
value = ["${aws_security_group.kubernetes.*.id}"]
}

View File

@@ -1,24 +0,0 @@
variable "aws_vpc_cidr_block" {
description = "CIDR Blocks for AWS VPC"
}
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_avail_zones" {
description = "AWS Availability Zones Used"
type = "list"
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones"
type = "list"
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones"
type = "list"
}

View File

@@ -1,24 +0,0 @@
output "bastion_ip" {
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
}
output "masters" {
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
}
output "workers" {
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
}
output "etcd" {
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
}
output "aws_elb_api_fqdn" {
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
}
output "inventory" {
value = "${data.template_file.inventory.rendered}"
}

View File

@@ -1,28 +0,0 @@
${connection_strings_master}
${connection_strings_node}
${connection_strings_etcd}
${public_ip_address_bastion}
[kube-master]
${list_master}
[kube-node]
${list_node}
[etcd]
${list_etcd}
[k8s-cluster:children]
kube-node
kube-master
[k8s-cluster:vars]
${elb_api_fqdn}
${elb_api_port}
${loadbalancer_apiserver_address}

View File

@@ -1,32 +1,22 @@
#Global Vars
aws_cluster_name = "devtest"
deploymentName="test-kube-deploy"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["us-west-2a","us-west-2b"]
numControllers="2"
numEtcd="3"
numNodes="2"
#Bastion Host
aws_bastion_ami = "ami-db56b9a3"
aws_bastion_size = "t2.medium"
volSizeController="20"
volSizeEtcd="20"
volSizeNodes="20"
awsRegion="us-west-2"
subnet="subnet-xxxxx"
ami="ami-32a85152"
securityGroups="sg-xxxxx"
SSHUser="core"
SSHKey="my-key"
#Kubernetes Cluster
master_instance_type="m3.xlarge"
etcd_instance_type="m3.xlarge"
node_instance_type="m3.xlarge"
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_cluster_ami = "ami-db56b9a3"
#Settings AWS ELB
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
terminate_protect="false"

View File

@@ -1,32 +0,0 @@
#Global Vars
aws_cluster_name = "devtest"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
#Bastion Host
aws_bastion_ami = "ami-5900cc36"
aws_bastion_size = "t2.small"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_cluster_ami = "ami-903df7ff"
#Settings AWS ELB
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = 0.0.0.0

View File

@@ -1,101 +0,0 @@
variable "AWS_ACCESS_KEY_ID" {
description = "AWS Access Key"
}
variable "AWS_SECRET_ACCESS_KEY" {
description = "AWS Secret Key"
}
variable "AWS_SSH_KEY_NAME" {
description = "Name of the SSH keypair to use in AWS."
}
variable "AWS_DEFAULT_REGION" {
description = "AWS Region"
}
//General Cluster Settings
variable "aws_cluster_name" {
description = "Name of AWS Cluster"
}
//AWS VPC Variables
variable "aws_vpc_cidr_block" {
description = "CIDR Block for VPC"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = "list"
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability Zones"
type = "list"
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability Zones"
type = "list"
}
//AWS EC2 Settings
variable "aws_bastion_ami" {
description = "AMI ID for Bastion Host in chosen AWS Region"
}
variable "aws_bastion_size" {
description = "EC2 Instance Size of Bastion Host"
}
/*
* AWS EC2 Settings
* The number should be divisable by the number of used
* AWS Availability Zones without an remainder.
*/
variable "aws_kube_master_num" {
description = "Number of Kubernetes Master Nodes"
}
variable "aws_kube_master_size" {
description = "Instance size of Kube Master Nodes"
}
variable "aws_etcd_num" {
description = "Number of etcd Nodes"
}
variable "aws_etcd_size" {
description = "Instance size of etcd Nodes"
}
variable "aws_kube_worker_num" {
description = "Number of Kubernetes Worker Nodes"
}
variable "aws_kube_worker_size" {
description = "Instance size of Kubernetes Worker Nodes"
}
variable "aws_cluster_ami" {
description = "AMI ID for Kubernetes Cluster"
}
/*
* AWS ELB Settings
*
*/
variable "aws_elb_api_port" {
description = "Port for AWS ELB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "loadbalancer_apiserver_address" {
description= "Bind Address for ELB of K8s API Server"
}

View File

@@ -1 +0,0 @@
../../inventory/group_vars

View File

@@ -11,7 +11,7 @@ services.
There are some assumptions made to try and ensure it will work on your openstack cluster.
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which needs to be used on a master. If using more than one, at least one should be on a master for bastions to work fine.
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
* you already have a suitable OS image in glance
* you already have both an internal network and a floating-ip pool created
* you have security-groups enabled
@@ -36,8 +36,6 @@ Ensure your OpenStack **Identity v2** credentials are loaded in environment vari
$ source ~/.stackrc
```
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
You will need two networks before installing, an internal network and
an external (floating IP Pool) network. The internet network can be shared as
we use security groups to provide network segregation. Due to the many
@@ -75,9 +73,7 @@ $ echo Setting up Terraform creds && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
##### Alternative: etcd inside masters
If you want to provision master or node VMs that don't use floating ips and where etcd is inside masters, write on a `my-terraform-vars.tfvars` file, for example:
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
```
number_of_k8s_masters = "1"
@@ -87,32 +83,10 @@ number_of_k8s_nodes = "0"
```
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
##### Alternative: etcd on separate machines
If you want to provision master or node VMs that don't use floating ips and where **etcd is on separate nodes from Kubernetes masters**, write on a `my-terraform-vars.tfvars` file, for example:
```
number_of_etcd = "3"
number_of_k8s_masters = "0"
number_of_k8s_masters_no_etcd = "1"
number_of_k8s_masters_no_floating_ip = "0"
number_of_k8s_masters_no_floating_ip_no_etcd = "2"
number_of_k8s_nodes_no_floating_ip = "1"
number_of_k8s_nodes = "2"
flavor_k8s_node = "desired-flavor-id"
flavor_k8s_master = "desired-flavor-id"
flavor_etcd = "desired-flavor-id"
```
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy), two VMs as nodes with floating ips, one VM as node without floating ip and three VMs for etcd.
##### Alternative: add GlusterFS
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
```
# Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
# This is the name of an image already available in your openstack installation.
image_gfs = "Ubuntu 15.10"
@@ -125,46 +99,6 @@ ssh_user_gfs = "ubuntu"
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
# Configure Cluster variables
Edit `inventory/group_vars/all.yml`:
- Set variable **bootstrap_os** according selected image
```
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: coreos
```
- **bin_dir**
```
# Directory where the binaries will be installed
# Default:
# bin_dir: /usr/local/bin
# For Container Linux by CoreOS:
bin_dir: /opt/bin
```
- and **cloud_provider**
```
cloud_provider: openstack
```
Edit `inventory/group_vars/k8s-cluster.yml`:
- Set variable **kube_network_plugin** according selected networking
```
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
```
> flannel works out-of-the-box
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
- Set variable **resolvconf_mode**
```
# Can be docker_dns, host_resolvconf or none
# Default:
# resolvconf_mode: docker_dns
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
# Provision a Kubernetes Cluster on OpenStack
@@ -222,49 +156,6 @@ Deploy kubernetes:
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
```
# Set up local kubectl
1. Install kubectl on your workstation:
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
2. Add route to internal IP of master node (if needed):
```
sudo route add [master-internal-ip] gw [router-ip]
```
or
```
sudo route add -net [internal-subnet]/24 gw [router-ip]
```
3. List Kubernetes certs&keys:
```
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
```
4. Get admin's certs&key:
```
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
```
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
6. Configure kubectl:
```
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
--certificate-authority=ca.pem
kubectl config set-credentials default-admin \
--certificate-authority=ca.pem \
--client-key=admin-key.pem \
--client-certificate=admin.pem
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
kubectl config use-context default-system
```
7. Check it:
```
kubectl version
```
# What's next
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
# clean up:
```

View File

@@ -1 +0,0 @@
../../../inventory/group_vars

View File

@@ -0,0 +1 @@
../../../../inventory/group_vars/all.yml

View File

@@ -1,5 +1,5 @@
resource "openstack_networking_floatingip_v2" "k8s_master" {
count = "${var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd}"
count = "${var.number_of_k8s_masters}"
pool = "${var.floatingip_pool}"
}
@@ -68,49 +68,11 @@ resource "openstack_compute_instance_v2" "k8s_master" {
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.number_of_k8s_masters_no_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}" ]
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index + var.number_of_k8s_masters)}"
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
}
}
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.number_of_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
@@ -125,34 +87,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
"${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.number_of_k8s_nodes}"
@@ -166,7 +107,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,vault"
kubespray_groups = "kube-node,k8s-cluster"
}
}
@@ -182,10 +123,10 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
kubespray_groups = "kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}

View File

@@ -6,22 +6,10 @@ variable "number_of_k8s_masters" {
default = 2
}
variable "number_of_k8s_masters_no_etcd" {
default = 2
}
variable "number_of_etcd" {
default = 2
}
variable "number_of_k8s_masters_no_floating_ip" {
default = 2
}
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
default = 2
}
variable "number_of_k8s_nodes" {
default = 1
}
@@ -71,10 +59,6 @@ variable "flavor_k8s_node" {
default = 3
}
variable "flavor_etcd" {
default = 3
}
variable "flavor_gfs_node" {
default = 3
}

View File

@@ -8,7 +8,7 @@ The inventory is composed of 3 groups:
* **kube-node** : list of kubernetes nodes where the pods will run.
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
Note: do not modify the children of _k8s-cluster_, like putting
the _etcd_ group into the _k8s-cluster_, unless you are certain
@@ -27,7 +27,7 @@ not _kube-node_.
There are also two special groups:
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable
Below is a complete inventory example:
@@ -67,33 +67,31 @@ Group vars and overriding variables precedence
----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
Optional variables are located in the `inventory/group_vars/all.yml`.
Mandatory variables that are common for at least one role (or a node group) can be found in the
`inventory/group_vars/k8s-cluster.yml`.
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overriden from the group vars. In order to override, one should use
the `-e ` runtime flags (most simple way) or other layers described in the docs.
Kubespray uses only a few layers to override things (or expect them to
Kargo uses only a few layers to override things (or expect them to
be overriden for roles):
Layer | Comment
------|--------
**role defaults** | provides best UX to override things for Kubespray deployments
**role defaults** | provides best UX to override things for Kargo deployments
inventory vars | Unused
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
inventory host_vars | Unused
playbook group_vars | Unused
playbook group_vars | Unuses
playbook host_vars | Unused
**host facts** | Kubespray overrides for internal roles' logic, like state flags
**host facts** | Kargo overrides for internal roles' logic, like state flags
play vars | Unused
play vars_prompt | Unused
play vars_files | Unused
registered vars | Unused
set_facts | Kubespray overrides those, for some places
set_facts | Kargo overrides those, for some places
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
task vars (only for the task) | Unused for roles, but only for helper scripts
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
@@ -124,12 +122,12 @@ The following tags are defined in playbooks:
| k8s-pre-upgrade | Upgrading K8s cluster
| k8s-secrets | Configuring K8s certs/keys
| kpm | Installing K8s apps definitions with KPM
| kube-apiserver | Configuring static pod kube-apiserver
| kube-controller-manager | Configuring static pod kube-controller-manager
| kube-apiserver | Configuring self-hosted kube-apiserver
| kube-controller-manager | Configuring self-hosted kube-controller-manager
| kubectl | Installing kubectl and bash completion
| kubelet | Configuring kubelet service
| kube-proxy | Configuring static pod kube-proxy
| kube-scheduler | Configuring static pod kube-scheduler
| kube-proxy | Configuring self-hosted kube-proxy
| kube-scheduler | Configuring self-hosted kube-scheduler
| localhost | Special steps for the localhost (ansible runner)
| master | Configuring K8s master node role
| netchecker | Installing netchecker K8s app
@@ -162,7 +160,7 @@ ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags
And this prepares all container images localy (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes:
```
ansible-playbook -i inventory/inventory.ini cluster.yml \
ansible-playbook -i inventory/inventory.ini cluster.yaml \
-e download_run_once=true -e download_localhost=true \
--tags download --skip-tags upload,upgrade
```

View File

@@ -1,22 +0,0 @@
Atomic host bootstrap
=====================
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
Note: Flannel is the only plugin that has currently been tested with atomic
### Vagrant
* For bootstrapping with Vagrant, use box centos/atomic-host
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
* Update `vm_memory = 2048` and `vm_cpus = 2`
* Networking on vagrant hosts has to be brought up manually once they are booted.
```
vagrant ssh
sudo /sbin/ifup enp0s8
```
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -3,58 +3,8 @@ AWS
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
You can now create your cluster!
### Dynamic Inventory ###
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
This will produce an inventory that is passed into Ansible that looks like the following:
```
{
"_meta": {
"hostvars": {
"ip-172-31-3-xxx.us-east-2.compute.internal": {
"ansible_ssh_host": "172.31.3.xxx"
},
"ip-172-31-8-xxx.us-east-2.compute.internal": {
"ansible_ssh_host": "172.31.8.xxx"
}
}
},
"etcd": [
"ip-172-31-3-xxx.us-east-2.compute.internal"
],
"k8s-cluster": {
"children": [
"kube-master",
"kube-node"
]
},
"kube-master": [
"ip-172-31-3-xxx.us-east-2.compute.internal"
],
"kube-node": [
"ip-172-31-8-xxx.us-east-2.compute.internal"
]
}
```
Guide:
- Create instances in AWS as needed.
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
- Set the following AWS credentials and info as environment variables in your terminal:
```
export AWS_ACCESS_KEY_ID="xxxxx"
export AWS_SECRET_ACCESS_KEY="yyyyy"
export REGION="us-east-2"
```
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`

View File

@@ -1,7 +1,7 @@
Azure
===============
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
@@ -49,8 +49,8 @@ This is the AppId from the last command
- Create the role assignment with:
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
## Provisioning Azure with Resource Group Templates
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)

View File

@@ -96,7 +96,7 @@ You need to edit your inventory and add:
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
Here's an example of Kubespray inventory with route reflectors:
Here's an example of Kargo inventory with route reflectors:
```
[all]
@@ -145,27 +145,9 @@ cluster_id="1.0.0.1"
The inventory above will deploy the following topology assuming that calico's
`global_as_num` is set to `65400`:
![Image](figures/kubespray-calico-rr.png?raw=true)
##### Optional : Define default endpoint to host action
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
To re-define default action please set the following variable in your inventory:
```
calico_endpoint_to_host_action: "ACCEPT"
```
![Image](figures/kargo-calico-rr.png?raw=true)
Cloud providers configuration
=============================
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
##### Optional : Ignore kernel's RPF check setting
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
```
calico_node_ignorelooserpf: true
```

View File

@@ -3,17 +3,17 @@ Cloud providers
#### Provisioning
You can use kubespray-cli to start new instances on cloud providers
You can use kargo-cli to start new instances on cloud providers
here's an example
```
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
```
#### Deploy kubernetes
With kubespray-cli
With kargo-cli
```
kubespray deploy [--aws|--gce] -u admin
kargo deploy [--aws|--gce] -u admin
```
Or ansible-playbook command

View File

@@ -1,25 +1,25 @@
Kubespray vs [Kops](https://github.com/kubernetes/kops)
Kargo vs [Kops](https://github.com/kubernetes/kops)
---------------
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
provisioning and orchestration. Kops performs the provisioning and orchestration
itself, and as such is less flexible in deployment platforms. For people with
familiarity with Ansible, existing Ansible deployments or the desire to run a
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
however, is more tightly integrated with the unique features of the clouds it
supports so it could be a better choice if you know that you will only be using
one platform for the foreseeable future.
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
------------------
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
management, including self-hosted layouts, dynamic discovery services and so
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
it may have been named a "Kubernetes cluster operator". Kubespray however,
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
it would've likely been named a "Kubernetes cluster operator". Kargo however,
does generic configuration management tasks from the "OS operators" ansible
world, plus some initial K8s clustering (with networking plugins included) and
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
to adopt kubeadm as a tool in order to consume life cycle management domain
knowledge from it and offload generic OS configuration things from it, which
hopefully benefits both sides.

View File

@@ -1,20 +1,24 @@
CoreOS bootstrap
===============
Example with **kubespray-cli**:
Example with **kargo-cli**:
```
kubespray deploy --gce --coreos
kargo deploy --gce --coreos
```
Or with Ansible:
Before running the cluster playbook you must satisfy the following requirements:
General CoreOS Pre-Installation Notes:
- You should set the bootstrap_os variable to `coreos`
- Ensure that the bin_dir is set to `/opt/bin`
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
* run the Python bootstrap playbook
```
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
```
Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -1,7 +1,7 @@
K8s DNS stack by Kubespray
K8s DNS stack by Kargo
======================
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
to serve as an authoritative DNS server for a given ``dns_domain`` and its
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
DNS modes supported by Kubespray
DNS modes supported by kargo
============================
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
#### dnsmasq_kubedns (default)
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
@@ -67,7 +67,7 @@ This does not install any of dnsmasq and kubedns/skydns. This basically disables
leaves you with a non functional cluster.
## resolvconf_mode
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
There are three modes available:
#### docker_dns (default)
@@ -100,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
servers, which in turn will forward queries to the system nameserver if required.
#### host_resolvconf
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
@@ -120,7 +120,7 @@ cluster service names.
Limitations
-----------
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
not answer with authority to arbitrary recursive resolvers. This task is left
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
for details.

View File

@@ -1,7 +1,7 @@
Downloading binaries and containers
===================================
Kubespray supports several download/upload modes. The default is:
Kargo supports several download/upload modes. The default is:
* Each node downloads binaries and container images on its own, which is
``download_run_once: False``.

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -23,6 +23,13 @@ ip a show dev flannel.1
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```

View File

@@ -1,44 +1,44 @@
Getting started
===============
The easiest way to run the deployement is to use the **kubespray-cli** tool.
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
The easiest way to run the deployement is to use the **kargo-cli** tool.
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
Here is a simple example on AWS:
* Create instances and generate the inventory
```
kubespray aws --instances 3
kargo aws --instances 3
```
* Run the deployment
```
kubespray deploy --aws -u centos -n calico
kargo deploy --aws -u centos -n calico
```
Building your own inventory
---------------------------
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
an example inventory located
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
You can use an
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
to create or modify an Ansible inventory. Currently, it is limited in
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
support creating inventory file for large clusters as well. It now supports
functionality and is only use for making a basic Kargo cluster, but it does
support creating large clusters. It now supports
separated ETCD and Kubernetes master roles from node role if the size exceeds a
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` help for more information.
certain threshold. Run inventory.py help for more information.
Example inventory generator usage:
```
cp -r inventory my_inventory
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
```
Starting custom deployment
@@ -47,71 +47,10 @@ Starting custom deployment
Once you have an inventory, you may want to customize deployment data vars
and start the deployment:
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
```
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
# Edit my_inventory/groups_vars/*.yaml to override data vars
ansible-playbook -i my_inventory/inventory.cfg cluster.yaml -b -v \
--private-key=~/.ssh/private_key
```
See more details in the [ansible guide](ansible.md).
Adding nodes
------------
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
```
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
--private-key=~/.ssh/private_key
```
Connecting to Kubernetes
------------------------
By default, Kubespray configures kube-master hosts with insecure access to
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
generated will point to localhost (on kube-masters) and kube-node hosts will
connect either to a localhost nginx proxy or to a loadbalancer if configured.
More details on this process are in the [HA guide](ha.md).
Kubespray permits connecting to the cluster remotely on any IP of any
kube-master host on port 6443 by default. However, this requires
authentication. One could generate a kubeconfig based on one installed
kube-master hosts (needs improvement) or connect with a username and password.
By default, a user with admin rights is created, named `kube`.
The password can be viewed after deployment by looking at the file
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
password. If you wish to set your own password, just precreate/modify this
file yourself.
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
Accessing Kubernetes Dashboard
------------------------------
If the variable `dashboard_enabled` is set (default is true), then you can
access the Kubernetes Dashboard at the following URL:
https://kube:_kube-password_@_host_:6443/ui/
To see the password, refer to the section above, titled *Connecting to
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
(when enabled).
Accessing Kubernetes API
------------------------
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
host and can optionally be configured on your ansible host by setting
`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
admin.conf will appear in the artifacts/ directory after deployment. You can
see a list of nodes by running the following commands:
cd artifacts/
./kubectl --kubeconfig admin.conf get nodes
If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.

View File

@@ -11,31 +11,38 @@ achieve the same goal.
Etcd
----
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
cluster members as backends. Note that the `access_ip` is used as the backend
IP, if specified. Frontend endpoints cannot be accessed externally as they are
bound to a localhost only.
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
It makes deployed components to access the etcd cluster members
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
When enabled, it makes deployed components to access the etcd cluster members
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
do a loadbalancing and handle HA for connections.
do a loadbalancing and handle HA for connections. Note, a pod definition of a
flannel networking plugin always uses a single `--etcd-server` endpoint!
Kube-apiserver
--------------
K8s components require a loadbalancer to access the apiservers via a reverse
proxy. Kubespray includes support for an nginx-based proxy that resides on each
proxy. Kargo includes support for an nginx-based proxy that resides on each
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
is less efficient than a dedicated load balancer because it creates extra
health checks on the Kubernetes apiserver, but is more practical for scenarios
where an external LB or virtual IP management is inconvenient. This option is
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
You may also define the port the local internal loadbalancer uses by changing,
configured by the variable `loadbalancer_apiserver_localhost`. You may also
define the port the local internal loadbalancer users by changing,
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
It is also important to note that Kubespray will only configure kubelet and kube-proxy
It is also import to note that Kargo will only configure kubelet and kube-proxy
on non-master nodes to use the local internal loadbalancer.
If you choose to NOT use the local internal loadbalancer, you will need to configure
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
a user and is not covered by ansible roles in Kubespray. By default, it only configures
a user and is not covered by ansible roles in Kargo. By default, it only configures
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
node in the `kube-master` group. It can also configure clients to use endpoints
for a given loadbalancer type. The following diagram shows how traffic to the
@@ -61,8 +68,8 @@ listen kubernetes-apiserver-https
mode tcp
timeout client 3h
timeout server 3h
server master1 <IP1>:6443
server master2 <IP2>:6443
server master1 <IP1>:443
server master2 <IP2>:443
balance roundrobin
```
@@ -88,9 +95,9 @@ Access endpoints are evaluated automagically, as the following:
| Endpoint type | kube-master | non-master |
|------------------------------|---------------|---------------------|
| Local LB (default) | http://lc:p | https://lc:nsp |
| Local LB | http://lc:p | https://lc:nsp |
| External LB, no internal | https://lb:lp | https://lb:lp |
| No ext/int LB | http://lc:p | https://m[0].aip:sp |
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
Where:
* `m[0]` - the first node in the `kube-master` group;

View File

@@ -1,121 +0,0 @@
# Kubespray (kargo) in own ansible playbooks repo
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
Note:
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
* List of all forked repos could be retrieved from github page of original project.
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
Git will create _.gitmodules_ file in your existent ansible repo:
```
[submodule "3d/kubespray"]
path = 3d/kubespray
url = https://github.com/YOUR_GITHUB/kubespray.git
```
3. Configure git to show submodule status:
```git config --global status.submoduleSummary true```
4. Add *original* kubespray repo as upstream:
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
5. Sync your master branch with upstream:
```
git checkout master
git fetch upstream
git merge upstream/master
git push origin master
```
6. Create a new branch which you will use in your working environment:
```git checkout -b work```
***Never*** use master branch of your repository for your commits.
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
```
...
library = 3d/kubespray/library/
roles_path = 3d/kubespray/roles/
...
```
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
For example:
```
...
#Kargo groups:
[kube-node:children]
kubenode
[k8s-cluster:children]
kubernetes
[etcd:children]
kubemaster
kubemaster-ha
[kube-master:children]
kubemaster
kubemaster-ha
[vault:children]
kube-master
[kubespray:children]
kubernetes
```
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
```
- name: Include kargo tasks
include: 3d/kubespray/cluster.yml
```
Or your could copy separate tasks from cluster.yml into your ansible repository.
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
When you update your "work" branch you need to commit changes to ansible repo as well.
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
# Contributing
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
0. Sign the [CNCF CLA](https://github.com/kubernetes/kubernetes/wiki/CLA-FAQ).
1. Change working directory to git submodule directory (3d/kubespray).
2. Setup desired user.name and user.email for submodule.
If kubespray is only one submodule in your repo you could use something like:
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
3. Sync with upstream master:
```
git fetch upstream
git merge upstream/master
git push origin master
```
4. Create new branch for the specific fixes that you want to contribute:
```git checkout -b fixes-name-date-index```
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
```
git cherry-pick <COMMIT_HASH>
```
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
Check that you're on correct branch:
```git status```
And pull changes from upstream (if any):
```git pull --rebase upstream master```
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.

View File

@@ -1,103 +0,0 @@
# Overview
Distributed system such as Kubernetes are designed to be resilient to the
failures. More details about Kubernetes High-Availability (HA) may be found at
[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/)
To have a simple view the most of parts of HA will be skipped to describe
Kubelet<->Controller Manager communication only.
By default the normal behavior looks like:
1. Kubelet updates it status to apiserver periodically, as specified by
`--node-status-update-frequency`. The default value is **10s**.
2. Kubernetes controller manager checks the statuses of Kubelets every
`-node-monitor-period`. The default value is **5s**.
3. In case the status is updated within `--node-monitor-grace-period` of time,
Kubernetes controller manager considers healthy status of Kubelet. The
default value is **40s**.
> Kubernetes controller manager and Kubelets work asynchronously. It means that
> the delay may include any network latency, API Server latency, etcd latency,
> latency caused by load on one's master nodes and so on. So if
> `--node-status-update-frequency` is set to 5s in reality it may appear in
> etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum
> nodes.
# Failure
Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
`nodeStatusUpdateRetry` is constantly set to 5 in
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
Kubelet will try to update the status in
[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L345)
function. Kubelet uses `http.Client()` Golang method, but has no specified
timeout. Thus there may be some glitches when API Server is overloaded while
TCP connection is established.
So, there will be `nodeStatusUpdateRetry` * `--node-status-update-frequency`
attempts to set a status of node.
At the same time Kubernetes controller manager will try to check
`nodeStatusUpdateRetry` times every `--node-monitor-period` of time. After
`--node-monitor-grace-period` it will consider node unhealthy. It will remove
its pods based on `--pod-eviction-timeout`
Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will
notice and will update iptables of the node. It will remove endpoints from
services so pods from failed node won't be accessible anymore.
# Recommendations for different cases
## Fast Update and Fast Reaction
If `-node-status-update-frequency` is set to **4s** (10s is default).
`--node-monitor-period` to **2s** (5s is default).
`--node-monitor-grace-period` to **20s** (40s is default).
`--pod-eviction-timeout` is set to **30s** (5m is default)
In such scenario, pods will be evicted in **50s** because the node will be
considered as down after **20s**, and `--pod-eviction-timeout` occurs after
**30s** more. However, this scenario creates an overhead on etcd as every node
will try to update its status every 2 seconds.
If the environment has 1000 nodes, there will be 15000 node updates per
minute which may require large etcd containers or even dedicated nodes for etcd.
> If we calculate the number of tries, the division will give 5, but in reality
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
> total number of attemtps will vary from 15 to 25 due to latency of all
> components.
## Medium Update and Average Reaction
Let's set `-node-status-update-frequency` to **20s**
`--node-monitor-grace-period` to **2m** and `--pod-eviction-timeout` to **1m**.
In that case, Kubelet will try to update status every 20s. So, it will be 6 * 5
= 30 attempts before Kubernetes controller manager will consider unhealthy
status of node. After 1m it will evict all pods. The total time will be 3m
before eviction process.
Such scenario is good for medium environments as 1000 nodes will require 3000
etcd updates per minute.
> In reality, there will be from 4 to 6 node update tries. The total number of
> of attempts will vary from 20 to 30.
## Low Update and Slow reaction
Let's set `-node-status-update-frequency` to **1m**.
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
to **1m**. In this scenario, every kubelet will try to update the status every
minute. There will be 5 * 5 = 25 attempts before unhealty status. After 5m,
Kubernetes controller manager will set unhealthy status. This means that pods
will be evicted after 1m after being marked unhealthy. (6m in total).
> In reality, there will be from 3 to 5 tries. The total number of attempt will
> vary from 15 to 25.
There can be different combinations such as Fast Update with Slow reaction to
satisfy specific cases.

View File

@@ -3,8 +3,7 @@ Large deployments of K8s
For a large scaled deployments, consider the following configuration changes:
* Tune [ansible settings]
(http://docs.ansible.com/ansible/intro_configuration.html)
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
* Override containers' `foo_image_repo` vars to point to intranet registry.
@@ -24,15 +23,9 @@ For a large scaled deployments, consider the following configuration changes:
* Tune CPU/memory limits and requests. Those are located in roles' defaults
and named like ``foo_memory_limit``, ``foo_memory_requests`` and
``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units
will end up with the 'm' skipped for docker as well. This is required as
docker does not understand k8s units well.
* Tune ``kubelet_status_update_frequency`` to increase reliability of kubelet.
``kube_controller_node_monitor_grace_period``,
``kube_controller_node_monitor_period``,
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
Check out [Kubernetes Reliability](kubernetes-reliability.md)
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units will
end up with the 'm' skipped for docker as well. This is required as docker does not
understand k8s units well.
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
from host/network interruption much quicker with calico-rr. Note that
@@ -40,7 +33,7 @@ For a large scaled deployments, consider the following configuration changes:
etcd role is okay).
* Check out the
[Inventory](getting-started.md#building-your-own-inventory)
[Inventory](https://github.com/kubernetes-incubator/kargo/blob/master/docs/getting-started.md#building-your-own-inventory)
section of the Getting started guide for tips on creating a large scale
Ansible inventory.

View File

@@ -1,8 +1,8 @@
Network Checker Application
===========================
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
Network Checker Application from the 3rd side `l23network/mcp-netchecker` docker
images. It consists of the server and agents trying to reach the server by usual
for Kubernetes applications network connectivity meanings. Therefore, this
automagically verifies a pod to pod connectivity via the cluster IP and checks
@@ -17,7 +17,7 @@ any of the cluster nodes:
```
curl http://localhost:31081/api/v1/connectivity_check
```
Note that Kubespray does not invoke the check but only deploys the application, if
Note that Kargo does not invoke the check but only deploys the application, if
requested.
There are related application specifc variables:
@@ -25,8 +25,8 @@ There are related application specifc variables:
netchecker_port: 31081
agent_report_interval: 15
netcheck_namespace: default
agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
agent_img: "quay.io/l23network/mcp-netchecker-agent:v0.1"
server_img: "quay.io/l23network/mcp-netchecker-server:v0.1"
```
Note that the application verifies DNS resolve for FQDNs comprising only the

View File

@@ -35,12 +35,14 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
and `kube_pods_subnet` (default `10.233.64.0/18`.)
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
# allow kube_service_addresses and kube_pods_subnet network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
# allow kube_service_addresses network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
# allow kube_pods_subnet network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
Now you can finally run the playbook.

View File

@@ -1,24 +1,24 @@
Kubespray's roadmap
Kargo's roadmap
=================
### Kubeadm
- Propose kubeadm as an option in order to setup the kubernetes cluster.
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553)
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
- to be discussed, a way to provide the inventory
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
### Provisionning and cloud providers
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
- [ ] On AWS autoscaling, multi AZ
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234)
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
https://github.com/kubernetes/kubernetes/issues/18112)
### Tests
@@ -37,30 +37,30 @@ That would probably improve deployment speed and certs management [#553](https:/
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
### Lifecycle
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553)
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154)
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
- [ ] Drain worker node when shutting down/deleting an instance
- [ ] Upgrade granularity: select components to upgrade and skip others
### Networking
- [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160)
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159)
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
- [ ] Opencontrail
- [x] Canal
- [x] Cloud Provider native networking (instead of our network plugins)
### High availability
### High availability
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
### Kubespray-cli
### Kargo-cli
- Delete instances
- `kubespray vagrant` to setup a test cluster locally
- `kubespray azure` for Microsoft Azure support
- `kargo vagrant` to setup a test cluster locally
- `kargo azure` for Microsoft Azure support
- switch to Terraform instead of Ansible for provisionning
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
### Kubespray API
### Kargo API
- Perform all actions through an **API**
- Store inventories / configurations of mulltiple clusters
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
@@ -73,7 +73,7 @@ Include optionals deployments to init the cluster:
##### Others
##### Dashboards:
##### Dashboards:
- kubernetes-dashboard
- Fabric8
- Tectonic
@@ -87,8 +87,8 @@ Include optionals deployments to init the cluster:
### Others
- remove nodes (adding is already supported)
- being able to choose any k8s version (almost done)
- **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59)
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
- Review documentation (split in categories)
- **consul** -> if officialy supported by k8s
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)

View File

@@ -4,40 +4,25 @@ Travis CI test matrix
GCE instances
-------------
Here is the test matrix for the CI gates:
Here is the test matrix for the Travis CI gates:
| Network plugin| OS type| GCE region| Nodes layout|
|-------------------------|-------------------------|-------------------------|-------------------------|
| canal| debian-8-kubespray| asia-east1-a| ha-scale|
| canal| debian-8-kubespray| asia-east1-a| ha|
| calico| debian-8-kubespray| europe-west1-c| default|
| flannel| centos-7| asia-northeast1-c| default|
| calico| centos-7| us-central1-b| ha|
| weave| rhel-7| us-east1-c| default|
| canal| coreos-stable| us-west1-b| ha-scale|
| canal| coreos-stable| us-west1-b| default|
| canal| rhel-7| asia-northeast1-b| separate|
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
| calico| coreos-stable| us-central1-f| separate|
Node Layouts
------------
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
`default` is a non-HA two nodes setup with one separate `kube-node`
and the `etcd` group merged with the `kube-master`.
`separate` layout is when there is only node of each type, which includes
a kube-master, kube-node, and etcd cluster member.
`ha` layout consists of two etcd nodes, two masters and a single worker node,
with role intersection.
`scale` layout can be combined with above layouts. It includes 200 fake hosts
in the Ansible inventory. This helps test TLS certificate generation at scale
to prevent regressions and profile certain long-running tasks. These nodes are
never actually deployed, but certificates are generated for them.
Where the nodes layout `default` is a non-HA two nodes setup with the separate `kube-node`
and the `etcd` group merged with the `kube-master`. The `separate` layout is when
there is only node of each type, which is a kube master, compute and etcd cluster member.
And the `ha` layout stands for a two etcd nodes, two masters and a single worker node,
partially intersecting though.
Note, the canal network plugin deploys flannel as well plus calico policy controller.
@@ -55,15 +40,15 @@ GCE instances
| Stage| Network plugin| OS type| GCE region| Nodes layout
|--------------------|--------------------|--------------------|--------------------|--------------------|
| part1| calico| coreos-stable| us-west1-b| separate|
| part1| calico| coreos-stable| us-west1-b| separated|
| part1| canal| debian-8-kubespray| us-east1-b| ha|
| part1| weave| rhel-7| europe-west1-b| default|
| part2| flannel| centos-7| us-west1-a| default|
| part2| calico| debian-8-kubespray| us-central1-b| default|
| part2| canal| coreos-stable| us-east1-b| default|
| special| canal| rhel-7| us-east1-b| separate|
| special| weave| ubuntu-1604-xenial| us-central1-b| default|
| special| calico| centos-7| europe-west1-b| ha-scale|
| special| weave| coreos-alpha| us-west1-a| ha-scale|
| special| canal| rhel-7| us-east1-b| separated|
| special| weave| ubuntu-1604-xenial| us-central1-b| separated|
| special| calico| centos-7| europe-west1-b| ha|
| special| weave| coreos-alpha| us-west1-a| ha|
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.

View File

@@ -1,11 +1,11 @@
Upgrading Kubernetes in Kubespray
Upgrading Kubernetes in Kargo
=============================
#### Description
Kubespray handles upgrades the same way it handles initial deployment. That is to
Kargo handles upgrades the same way it handles initial deployment. That is to
say that each component is laid down in a fixed order. You should be able to
upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
also individually control versions of components by explicitly defining their
versions. Here are all version vars for each component:
@@ -18,7 +18,7 @@ versions. Here are all version vars for each component:
* flannel_version
* kubedns_version
#### Unsafe upgrade example
#### Example
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
deploy the following way:
@@ -33,51 +33,15 @@ And then repeat with v1.4.6 as kube_version:
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
```
#### Graceful upgrade
Kubespray also supports cordon, drain and uncordoning of nodes when performing
a cluster upgrade. There is a separate playbook used for this purpose. It is
important to note that upgrade-cluster.yml can only be used for upgrading an
existing cluster. That means there must be at least 1 kube-master already
deployed.
```
git fetch origin
git checkout origin/master
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
```
After a successul upgrade, the Server Version should be updated:
```
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
```
#### Upgrade order
As mentioned above, components are upgraded in the order in which they were
installed in the Ansible playbook. The order of component installation is as
follows:
* Docker
* etcd
* kubelet and kube-proxy
* network_plugin (such as Calico or Weave)
* kube-apiserver, kube-scheduler, and kube-controller-manager
* Add-ons (such as KubeDNS)
#### Upgrade considerations
Kubespray supports rotating certificates used for etcd and Kubernetes
components, but some manual steps may be required. If you have a pod that
requires use of a service token and is deployed in a namespace other than
`kube-system`, you will need to manually delete the affected pods after
rotating certificates. This is because all service account tokens are dependent
on the apiserver token that is used to generate them. When the certificate
rotates, all service account tokens must be rotated as well. During the
kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
recreated. All other invalidated service account tokens are cleaned up
automatically, but other pods are not deleted out of an abundance of caution
for impact to user deployed pods.
# Docker
# etcd
# kubelet and kube-proxy
# network_plugin (such as Calico or Weave)
# kube-apiserver, kube-scheduler, and kube-controller-manager
# Add-ons (such as KubeDNS)

View File

@@ -39,31 +39,3 @@ k8s-01 Ready 45s
k8s-02 Ready 45s
k8s-03 Ready 45s
```
Customize Vagrant
=================
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
or through an override file.
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
e.g.:
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
Use alternative OS for Vagrant
==============================
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
operating system for your local cluster.
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
echo '$os = "coreos-stable"' >> vagrant/config.rb
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.

View File

@@ -1,4 +1,4 @@
Configurable Parameters in Kubespray
Configurable Parameters in Kargo
================================
#### Generic Ansible variables
@@ -12,7 +12,7 @@ Some variables of note include:
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
#### Common vars that are used in Kubespray
#### Common vars that are used in Kargo
* *calico_version* - Specify version of Calico to use
* *calico_cni_version* - Specify version of Calico CNI plugin to use
@@ -23,7 +23,7 @@ Some variables of note include:
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
resides
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
* *kube_network_plugin* - Changes k8s plugin to Calico
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes hyperkube version
* *searchdomains* - Array of DNS domains to search when looking up hostnames
@@ -35,16 +35,15 @@ Some variables of note include:
* *access_ip* - IP for other hosts to use to connect to. Often required when
deploying from a cloud, such as OpenStack or GCE and you have separate
public/floating and private IPs.
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
and access_ip are undefined
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
address instead of localhost for kube-masters and kube-master[0] for
kube-nodes. See more details in the
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
the apiserver internally load balanced endpoint. Mutual exclusive to the
`loadbalancer_apiserver`. See more details in the
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
* *loadbalancer_apiserver_localhost* - If enabled, all hosts will connect to
the apiserver internally load balanced endpoint. See more details in the
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
#### Cluster variables
@@ -67,13 +66,6 @@ following default cluster paramters:
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
Kubernetes
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
alpha/experimental Kubernetes features. (defaults is `[]`)
* *authorization_modes* - A list of [authorization mode](
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
Note: `RBAC` is currently in experimental phase, and do not support either calico or
vault. Upgrade from non-RBAC to RBAC is not tested.
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses``
@@ -86,13 +78,13 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
variables to match your requirements.
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
addition to Kubespray deployed DNS
addition to Kargo deployed DNS
* *nameservers* - Array of DNS servers configured for use in dnsmasq
* *searchdomains* - Array of up to 4 search domains
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
For more information, see [DNS
Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
#### Other service variables
@@ -100,37 +92,9 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
``--insecure-registry=myregistry.mydomain:5000``
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
proxy
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
is unlikely to work on newer releases. Starting with Kubernetes v1.7
series, this now defaults to ``host``. Before v1.7, the default was Docker.
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
true to let kubelet load kernel modules.
* *kubelet_cgroup_driver* - Allows manual override of the
cgroup-driver option for Kubelet. By default autodetection is used
to match Docker configuration.
##### Custom flags for Kube Components
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
```
kubelet_custom_flags:
- "--eviction-hard=memory.available<100Mi"
- "--eviction-soft-grace-period=memory.available=30s"
- "--eviction-soft=memory.available<300Mi"
```
The possible vars are:
* *apiserver_custom_flags*
* *controller_mgr_custom_flags*
* *scheduler_custom_flags*
* *kubelet_custom_flags*
#### User accounts
By default, a user with admin rights is created, named `kube`.
The password can be viewed after deployment by looking at the file
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
password. If you wish to set your own password, just precreate/modify this
file yourself or change `kube_api_pwd` var.
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
passwords default to changeme. You can set this by changing ``kube_api_pwd``.

View File

@@ -1,95 +0,0 @@
Hashicorp Vault Role
====================
Overview
--------
The Vault role is a two-step process:
1. Bootstrap
You cannot start your certificate management service securely with SSL (and
the datastore behind it) without having the certificates in-hand already. This
presents an unfortunate chicken and egg scenario, with one requiring the other.
To solve for this, the Bootstrap step was added.
This step spins up a temporary instance of Vault to issue certificates for
Vault itself. It then leaves the temporary instance running, so that the Etcd
role can generate certs for itself as well. Eventually, this may be improved
to allow alternate backends (such as Consul), but currently the tasks are
hardcoded to only create a Vault role for Etcd.
2. Cluster
This step is where the long-term Vault cluster is started and configured. Its
first task, is to stop any temporary instances of Vault, to free the port for
the long-term. At the end of this task, the entire Vault cluster should be up
and read to go.
Keys to the Kingdom
-------------------
The two most important security pieces of Vault are the ``root_token``
and ``unsealing_keys``. Both of these values are given exactly once, during
the initialization of the Vault cluster. For convenience, they are saved
to the ``vault_secret_dir`` (default: /etc/vault/secrets) of every host in the
vault group.
It is *highly* recommended that these secrets are removed from the servers after
your cluster has been deployed, and kept in a safe location of your choosing.
Naturally, the seriousness of the situation depends on what you're doing with
your Kubespray cluster, but with these secrets, an attacker will have the ability
to authenticate to almost everything in Kubernetes and decode all private
(HTTPS) traffic on your network signed by Vault certificates.
For even greater security, you may want to remove and store elsewhere any
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
Vault by default encrypts all traffic to and from the datastore backend, all
resting data, and uses TLS for its TCP listener. It is recommended that you
do not change the Vault config to disable TLS, unless you absolutely have to.
Usage
-----
To get the Vault role running, you must to do two things at a minimum:
1. Assign the ``vault`` group to at least 1 node in your inventory
1. Change ``cert_management`` to be ``vault`` instead of ``script``
Nothing else is required, but customization is possible. Check
``roles/vault/defaults/main.yml`` for the different variables that can be
overridden, most common being ``vault_config``, ``vault_port``, and
``vault_deployment_type``.
As a result of the Vault role will be create separated Root CA for `etcd`,
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
* vault:
* ``/etc/vault/ssl/ca.pem``
* ``/etc/vault/ssl/ca-key.pem``
* etcd:
* ``/etc/ssl/etcd/ssl/ca.pem``
* ``/etc/ssl/etcd/ssl/ca-key.pem``
* kubernetes:
* ``/etc/kubernetes/ssl/ca.pem``
* ``/etc/kubernetes/ssl/ca-key.pem``
Additional Notes:
- ``groups.vault|first`` is considered the source of truth for Vault variables
- ``vault_leader_url`` is used as pointer for the current running Vault
- Each service should have its own role and credentials. Currently those
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
need to read in those credentials, if they want to interact with Vault.
Potential Work
--------------
- Change the Vault role to not run certain tasks when ``root_token`` and
``unseal_keys`` are not present. Alternatively, allow user input for these
values when missing.
- Add the ability to start temp Vault with Host, Rkt, or Docker
- Add a dynamic way to change out the backend role creation during Bootstrap,
so other services can be used (such as Consul)

View File

@@ -1,61 +0,0 @@
# vSphere cloud provider
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
- Volumes
- Persistent Volumes
- Storage Classes and provisioning of volumes.
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
## Prerequisites
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
After this step you should have:
- UUID activated for each VM where Kubernetes will be deployed
- A vSphere account with required privileges
## Kubespray configuration
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
```yml
cloud_provider: vsphere
```
Then, in the same file, you need to declare your vCenter credential following the description bellow.
| Variable | Required | Type | Choices | Default | Comment |
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
| vsphere_password | TRUE | string | | | Password for vCenter |
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
| vsphere_datastore | TRUE | string | | | Datastore name to use |
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
Example configuration
```yml
vsphere_vcenter_ip: "myvcenter.domain.com"
vsphere_vcenter_port: 443
vsphere_insecure: 1
vsphere_user: "k8s@vsphere.local"
vsphere_password: "K8s_admin"
vsphere_datacenter: "DATACENTER_name"
vsphere_datastore: "DATASTORE_name"
vsphere_working_dir: "Docker_hosts"
vsphere_scsi_controller_type: "pvscsi"
```
## Deployment
Once the configuration is set, you can execute the playbook again to apply the new configuration
```
cd kubespray
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
```
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.

View File

@@ -1,98 +0,0 @@
Weave
=======
Weave 2.0.1 is supported by kubespray
Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode.
`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster
Weave encryption is supported for all communication
* To use Weave encryption, specify a strong password (if no password, no encrytion)
```
# In file ./inventory/group_vars/k8s-cluster.yml
weave_password: EnterPasswordHere
```
This password is used to set an environment variable inside weave container.
Weave is deployed by kubespray using a daemonSet
* Check the status of Weave containers
```
# From client
kubectl -n kube-system get pods | grep weave
# output
weave-net-50wd2 2/2 Running 0 2m
weave-net-js9rb 2/2 Running 0 2m
```
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
* Check status of weave (connection,encryption ...) for each node
```
# On nodes
curl http://127.0.0.1:6784/status
# output on node1
Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
Service: router
Protocol: weave 1..2
Name: fa:16:3e:b3:d6:b2(node1)
Encryption: enabled
PeerDiscovery: enabled
Targets: 2
Connections: 2 (1 established, 1 failed)
Peers: 2 (with 2 established connections)
TrustedSubnets: none
Service: ipam
Status: ready
Range: 10.233.64.0/18
DefaultSubnet: 10.233.64.0/18
```
* Check parameters of weave for each node
```
# On nodes
ps -aux | grep weaver
# output on node1 (here its use seed mode)
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
```
### Consensus mode (default mode)
This mode is best to use on static size cluster
### Seed mode
This mode is best to use on dynamic size cluster
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployement.
* Switch from consensus mode to seed mode
```
# In file ./inventory/group_vars/k8s-cluster.yml
weave_mode_seed: true
```
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
```
# In file ./inventory/group_vars/k8s-cluster.yml
weave_seed: uninitialized
weave_peers: uninitialized
```
The first variable, `weave_seed`, contains the initial nodes of the weave network
The seconde variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
To reset these variables and reset the weave network set them to `uninitialized`

View File

@@ -1 +0,0 @@
../inventory

View File

@@ -1 +0,0 @@
../roles

View File

@@ -1,60 +0,0 @@
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
### Additional information:
### * Will not upgrade etcd
### * Will not upgrade network plugins
### * Will not upgrade Docker
### * Currently does not support Vault deployment.
###
### In most cases, you probably want to use upgrade-cluster.yml playbook and
### not this one.
- hosts: localhost
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
#Handle upgrades to master components first to maintain backwards compat.
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
- { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kubespray-defaults}

View File

@@ -1,68 +1,167 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
#Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
#access_ip: 1.1.1.1
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
### LOADBALANCING AND ACCESS MODES
## Enable multiaccess to configure etcd clients to access all of the etcd members directly
## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
## This may be the case if clients support and loadbalance multiple etcd servers natively.
#etcd_multiaccess: true
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
#loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
## Internal loadbalancers for apiservers
#loadbalancer_apiserver_localhost: true
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
## Local loadbalancer should use this port instead, if defined.
## Defaults to kube_apiserver_port (6443)
#nginx_kube_apiserver_port: 8443
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.5.1
### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
# kubelet_load_modules: false
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
## Internal network total size. This is the prefix of the
## entire network. Must be unused in your environment.
#kube_network_prefix: 18
# Uncomment this line for CoreOS only.
# Directory where python binary is installed
# ansible_python_interpreter: "/opt/bin/python"
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
kube_api_anonymous_auth: false
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# For some environments, each node has a pubilcally accessible
# address and an address it should bind services to. These are
# really inventory level variables, but described here for consistency.
#
# When advertising access, the access_ip will be used, but will defer to
# ip and then the default ansible ip when unspecified.
#
# When binding to restrict access, the ip variable will be used, but will
# defer to the default ansible ip when unspecified.
#
# The ip variable is used for specific address binding, e.g. listen address
# for etcd. This is use to help with environments like Vagrant or multi-nic
# systems where one address should be preferred over another.
# ip: 10.2.2.2
#
# The access_ip variable is used to define how other nodes should access
# the node. This is used in flannel to allow other flannel nodes to see
# this node for example. The access_ip is really useful AWS and Google
# environments where the nodes are accessed remotely by the "public" ip,
# but don't know about that address themselves.
# access_ip: 1.1.1.1
# Etcd access modes:
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: true
# Assume there are no internal loadbalancers for apiservers exist and listen on
# kube_apiserver_port (default 443)
loadbalancer_apiserver_localhost: true
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire network. Must be unused in your environment.
# kube_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# local loadbalancer should use this port instead - default to kube_apiserver_port
nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
## Upstream dns servers used by dnsmasq
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook.
#cloud_provider:
dns_domain: "{{ cluster_name }}"
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# There are some changes specific to the cloud providers
# for instance we need to encapsulate packets with some network plugins
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
# When openstack is used make sure to source in the openstack credentials
# like you would do when using nova-client before starting the playbook.
# When azure is used, you need to also set the following variables.
# cloud_provider:
# see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
@@ -74,53 +173,34 @@ bin_dir: /usr/local/bin
#azure_vnet_name:
#azure_route_table_name:
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
#openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
#openstack_lbaas_enabled: True
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
#openstack_lbaas_create_monitor: "yes"
#openstack_lbaas_monitor_delay: "1m"
#openstack_lbaas_monitor_timeout: "30s"
#openstack_lbaas_monitor_max_retries: "3"
## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
#
## Set these proxy values in order to update docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
#no_proxy: ""
# http_proxy: ""
# https_proxy: ""
# no_proxy: ""
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
docker_bin_dir: "/usr/bin"
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
#docker_dns_servers_strict: false
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
## Default packages to install within the cluster, f.e:
#kpm_packages:
# default packages to install within the cluster
kpm_packages: []
# - name: kube-system/grafana
## Certificate Management
## This setting determines whether certs are generated via scripts or whether a
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
## as a backend). Options are "script" or "vault"
#cert_management: script
## Please specify true if you want to perform a kernel upgrade
kernel_upgrade: false
# Set to true to allow pre-checks to fail and continue deployment
#ignore_assert_errors: false
## Etcd auto compaction retention for mvcc key value store in hour
#etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
#etcd_metrics: basic
# Settings for containerized control plane (etcd/kubelet)
rkt_version: 1.21.0
etcd_deployment_type: docker
kubelet_deployment_type: docker

View File

@@ -1,170 +0,0 @@
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.7.5
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
groups:
- system:masters
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
#kube_oidc_auth: false
#kube_basic_auth: true
#kube_token_auth: true
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
# kube_oidc_url: https:// ...
# kube_oidc_client_id: kubernetes
## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
# kube_oidc_groups_claim: groups
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed: false
# This two variable are automatically changed by the weave's role, do not manually change these values
# To reset values :
# weave_seed: uninitialized
# weave_peers: uninitialized
weave_seed: uninitialized
weave_peers: uninitialized
# Enable kubernetes network policies
enable_network_policy: false
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
docker_bin_dir: "/usr/bin"
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: host
cert_management: script
vault_deployment_type: docker
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
dashboard_enabled: true
# Monitoring apps for k8s
efk_enabled: false
# Helm deployment
helm_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
# kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
# kubectl_localhost: false
# dnsmasq
# dnsmasq_upstream_dns_servers:
# - /resolvethiszone.with/10.0.4.250
# - 8.8.8.8
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
# kubelet_cgroups_per_qos: true
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
# kubelet_enforce_node_allocatable: pods

Some files were not shown because too many files have changed in this diff Show More