2017-08-24 09:09:52 +00:00
---
2017-02-23 11:07:17 +00:00
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os : none
2017-10-27 11:18:39 +00:00
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args : "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} ' {% endif %}"
2017-02-23 11:07:17 +00:00
kube_api_anonymous_auth : false
2017-09-08 12:00:57 +00:00
# Default value, but will be set to true automatically if detected
is_atomic : false
2018-02-21 16:33:25 +00:00
2017-02-23 11:07:17 +00:00
## Change this to use another Kubernetes version, e.g. a current beta release
2018-02-27 11:31:59 +00:00
kube_version : v1.9.3
2017-02-23 11:07:17 +00:00
2018-02-21 16:33:25 +00:00
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode : iptables
2017-09-27 13:47:47 +00:00
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors : false
2017-02-23 11:07:17 +00:00
# Directory where the binaries will be installed
bin_dir : /usr/local/bin
docker_bin_dir : /usr/bin
2017-03-23 09:05:34 +00:00
etcd_data_dir : /var/lib/etcd
2017-02-23 11:07:17 +00:00
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir : "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger : 5
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name : cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots : 2
2018-01-23 13:28:42 +00:00
# Can be dnsmasq_kubedns, kubedns, manual or none
2017-07-07 17:13:12 +00:00
dns_mode : kubedns
2018-01-23 13:28:42 +00:00
# Should be set to a cluster IP if using a custom cluster DNS
# manual_dns_server: 10.x.x.x
2017-02-23 11:07:17 +00:00
# Can be docker_dns, host_resolvconf or none
resolvconf_mode : docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker : false
# Ip address of the kubernetes skydns service
skydns_server : "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
2017-10-11 19:40:21 +00:00
dnsmasq_dns_server : "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
2017-02-23 11:07:17 +00:00
dns_domain : "{{ cluster_name }}"
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir : /etc/kubernetes
kube_script_dir : "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir : "{{ kube_config_dir }}/manifests"
system_namespace : kube-system
# This is where all the cert scripts and certs will be located
kube_cert_dir : "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir : "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir : "{{ kube_config_dir }}/users"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group : kube-cert
# Cluster Loglevel configuration
kube_log_level : 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd : "changeme"
kube_users :
kube :
pass : "{{kube_api_pwd}}"
role : admin
2018-02-17 03:37:47 +00:00
# Choose network plugin (cilium, calico, weave or flannel)
2017-02-23 11:07:17 +00:00
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin : calico
2017-10-04 12:27:55 +00:00
# Determines if calico-rr group exists
peer_with_calico_rr : "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
2017-02-23 11:07:17 +00:00
# Kubernetes internal network for services, unused block of space.
kube_service_addresses : 10.233 .0 .0 /18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet : 10.233 .64 .0 /18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
2018-01-26 14:13:21 +00:00
# room for 64 nodes with 254 pods per node.
# Example: Up to 256 nodes, 100 pods per node (/16 network):
# - kube_service_addresses: 10.233.0.0/17
# - kube_pods_subnet: 10.233.128.0/17
# - kube_network_node_prefix: 25
# Example: Up to 4096 nodes, 100 pods per node (/12 network):
# - kube_service_addresses: 10.192.0.0/13
# - kube_pods_subnet: 10.200.0.0/13
# - kube_network_node_prefix: 25
2017-02-23 11:07:17 +00:00
kube_network_node_prefix : 24
2018-01-03 16:40:21 +00:00
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
# access IP value (automatically evaluated below)
2017-02-23 11:07:17 +00:00
kube_apiserver_ip : "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
2018-01-03 16:40:21 +00:00
kube_apiserver_bind_address : 0.0 .0 .0
2017-08-24 09:09:52 +00:00
# https
kube_apiserver_port : 6443
# http
2017-09-09 20:41:48 +00:00
kube_apiserver_insecure_bind_address : 127.0 .0 .1
2017-08-24 09:09:52 +00:00
kube_apiserver_insecure_port : 8080
2017-02-23 11:07:17 +00:00
2018-02-07 09:07:46 +00:00
# Aggregator
2018-02-07 08:50:08 +00:00
kube_api_aggregator_routing : true
2017-02-23 11:07:17 +00:00
# Path used to store Docker data
docker_daemon_graph : "/var/lib/docker"
2017-03-22 13:40:10 +00:00
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts : "--log-opt max-size=50m --log-opt max-file=5"
2017-02-23 11:07:17 +00:00
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
2017-03-22 13:40:10 +00:00
docker_options : "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
2017-02-23 11:07:17 +00:00
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type : docker
kubelet_deployment_type : docker
cert_management : script
vault_deployment_type : docker
2017-11-29 19:52:54 +00:00
helm_deployment_type : host
2017-02-23 11:07:17 +00:00
2017-09-13 18:00:51 +00:00
# Enable kubeadm deployment (experimental)
kubeadm_enabled : false
2017-09-18 12:30:57 +00:00
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
kubeconfig_localhost : false
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
kubectl_localhost : false
2017-02-23 11:07:17 +00:00
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy : IfNotPresent
2017-11-01 14:25:35 +00:00
2018-01-30 13:03:48 +00:00
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled : true
2017-11-01 14:25:35 +00:00
# Addons which can be enabled
2017-02-23 11:10:54 +00:00
efk_enabled : false
2017-10-19 08:17:11 +00:00
helm_enabled : false
istio_enabled : false
2018-02-06 13:17:06 +00:00
registry_enabled : false
2017-03-23 09:05:34 +00:00
enable_network_policy : false
2018-02-15 01:55:43 +00:00
local_volume_provisioner_enabled : "{{ local_volumes_enabled | default('false') }}"
2017-12-19 14:47:00 +00:00
persistent_volumes_enabled : false
2018-02-09 14:03:38 +00:00
cephfs_provisioner_enabled : false
2018-02-13 01:08:44 +00:00
ingress_nginx_enabled : false
2017-11-01 14:25:35 +00:00
2018-01-05 11:05:24 +00:00
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
2017-09-27 13:47:47 +00:00
# openstack_blockstorage_version: "v1/v2/auto (default)"
2018-01-05 11:05:24 +00:00
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
2017-08-20 10:59:15 +00:00
openstack_lbaas_enabled : false
2017-09-27 13:47:47 +00:00
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
2018-01-05 11:05:24 +00:00
## To enable automatic floating ip provisioning, specify a subnet.
2017-09-27 13:47:47 +00:00
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
2018-01-05 11:05:24 +00:00
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
openstack_lbaas_create_monitor : "yes"
openstack_lbaas_monitor_delay : "1m"
openstack_lbaas_monitor_timeout : "30s"
openstack_lbaas_monitor_max_retries : "3"
2017-08-20 10:59:15 +00:00
2017-06-27 04:27:25 +00:00
## List of authorization modes that must be configured for
2017-10-05 09:51:21 +00:00
## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
2017-10-14 10:28:46 +00:00
## 'RBAC' modes are tested. Order is important.
authorization_modes : [ 'Node' , 'RBAC' ]
2017-09-29 08:18:24 +00:00
rbac_enabled : "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
2017-08-24 20:18:38 +00:00
## List of key=value pairs that describe feature gates for
## the k8s cluster.
2018-02-15 01:55:43 +00:00
kube_feature_gates :
- "Initializers={{ istio_enabled | string }}"
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
2017-09-01 19:51:37 +00:00
# Vault data dirs.
vault_base_dir : /etc/vault
vault_cert_dir : "{{ vault_base_dir }}/ssl"
vault_config_dir : "{{ vault_base_dir }}/config"
vault_roles_dir : "{{ vault_base_dir }}/roles"
vault_secrets_dir : "{{ vault_base_dir }}/secrets"
2017-10-04 12:27:55 +00:00
2018-01-30 13:03:48 +00:00
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment : false
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password : EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed : false
# This two variable are automatically changed by the weave's role in group_vars/k8s-cluster.yml.
# Do not manually change these values
weave_seed : uninitialized
weave_peers : uninitialized
2017-10-11 18:47:27 +00:00
## Set no_proxy to all assigned cluster IPs and hostnames
no_proxy : >-
2017-10-13 14:43:10 +00:00
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name| default('') }},
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }},
{%- if (item != hostvars[item]['ansible_hostname']) -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%}
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
127.0 .0 .1 , localhost
2017-10-11 18:47:27 +00:00
proxy_env :
http_proxy : "{{ http_proxy| default ('') }}"
https_proxy : "{{ https_proxy| default ('') }}"
no_proxy : "{{ no_proxy }}"
2017-10-04 12:27:55 +00:00
# Vars for pointing to kubernetes api endpoints
is_kube_master : "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count : "{{ groups['kube-master'] | length }}"
kube_apiserver_address : "{{ ip | default(ansible_default_ipv4['address']) }}"
kube_apiserver_access_address : "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_master : "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
loadbalancer_apiserver_localhost : "{{ loadbalancer_apiserver is not defined }}"
2017-11-23 16:15:48 +00:00
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
apiserver_loadbalancer_domain_name : "lb-apiserver.kubernetes.local"
2017-10-04 12:27:55 +00:00
kube_apiserver_endpoint : |-
2017-11-23 16:15:48 +00:00
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
2017-10-04 12:27:55 +00:00
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
2018-01-03 16:40:21 +00:00
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
2017-10-04 12:27:55 +00:00
{%- else -%}
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif -%}
{%- endif %}
kube_apiserver_insecure_endpoint : >-
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
2017-11-06 20:01:10 +00:00
kube_apiserver_client_cert : |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.crt
{%- else -%}
{{ kube_cert_dir }}/apiserver.pem
{%- endif %}
kube_apiserver_client_key : |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.key
{%- else -%}
{{ kube_cert_dir }}/apiserver-key.pem
{%- endif %}
2017-10-04 12:27:55 +00:00
2018-03-01 08:39:14 +00:00
# Set to true to deploy etcd-events cluster
etcd_events_cluster_setup : false
2017-10-04 12:27:55 +00:00
# Vars for pointing to etcd endpoints
is_etcd_master : "{{ inventory_hostname in groups['etcd'] }}"
etcd_address : "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address : "{{ access_ip | default(etcd_address) }}"
etcd_peer_url : "https://{{ etcd_access_address }}:2380"
etcd_client_url : "https://{{ etcd_access_address }}:2379"
2018-03-01 08:39:14 +00:00
etcd_events_peer_url : "https://{{ etcd_access_address }}:2382"
etcd_events_client_url : "https://{{ etcd_access_address }}:2381"
2017-10-04 12:27:55 +00:00
etcd_access_addresses : |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
2018-03-01 08:39:14 +00:00
etcd_events_access_addresses : |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %}
{%- endfor %}
2017-10-04 12:27:55 +00:00
etcd_member_name : |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
etcd_peer_addresses : |-
{% for item in groups['etcd'] -%}
{{ "etcd" +loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
2018-03-01 08:39:14 +00:00
etcd_events_peer_addresses : |-
{% for item in groups['etcd'] -%}
{{ "etcd" +loop.index|string }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}