c12s-kubespray/roles/kubespray-defaults/defaults/main.yaml

306 lines
12 KiB
YAML
Raw Normal View History

---
2017-02-23 11:07:17 +00:00
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} ' {% endif %}"
2017-02-23 11:07:17 +00:00
kube_api_anonymous_auth: false
2017-09-08 12:00:57 +00:00
# Default value, but will be set to true automatically if detected
is_atomic: false
2017-02-23 11:07:17 +00:00
## Change this to use another Kubernetes version, e.g. a current beta release
2018-01-27 15:39:54 +00:00
kube_version: v1.9.2
2017-02-23 11:07:17 +00:00
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
2017-02-23 11:07:17 +00:00
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
2017-03-23 09:05:34 +00:00
etcd_data_dir: /var/lib/etcd
2017-02-23 11:07:17 +00:00
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
2018-01-23 13:28:42 +00:00
# Can be dnsmasq_kubedns, kubedns, manual or none
dns_mode: kubedns
2018-01-23 13:28:42 +00:00
# Should be set to a cluster IP if using a custom cluster DNS
# manual_dns_server: 10.x.x.x
2017-02-23 11:07:17 +00:00
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
2017-02-23 11:07:17 +00:00
dns_domain: "{{ cluster_name }}"
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (cilium, calico, weave or flannel)
2017-02-23 11:07:17 +00:00
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Determines if calico-rr group exists
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
2017-02-23 11:07:17 +00:00
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
2018-01-26 14:13:21 +00:00
# room for 64 nodes with 254 pods per node.
# Example: Up to 256 nodes, 100 pods per node (/16 network):
# - kube_service_addresses: 10.233.0.0/17
# - kube_pods_subnet: 10.233.128.0/17
# - kube_network_node_prefix: 25
# Example: Up to 4096 nodes, 100 pods per node (/12 network):
# - kube_service_addresses: 10.192.0.0/13
# - kube_pods_subnet: 10.200.0.0/13
# - kube_network_node_prefix: 25
2017-02-23 11:07:17 +00:00
kube_network_node_prefix: 24
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
# access IP value (automatically evaluated below)
2017-02-23 11:07:17 +00:00
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_bind_address: 0.0.0.0
# https
kube_apiserver_port: 6443
# http
kube_apiserver_insecure_bind_address: 127.0.0.1
kube_apiserver_insecure_port: 8080
2017-02-23 11:07:17 +00:00
# Aggregator
kube_api_aggregator_routing: true
2017-02-23 11:07:17 +00:00
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
2017-02-23 11:07:17 +00:00
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
2017-02-23 11:07:17 +00:00
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script
vault_deployment_type: docker
helm_deployment_type: host
2017-02-23 11:07:17 +00:00
# Enable kubeadm deployment (experimental)
kubeadm_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
kubectl_localhost: false
2017-02-23 11:07:17 +00:00
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Addons which can be enabled
2017-02-23 11:10:54 +00:00
efk_enabled: false
helm_enabled: false
istio_enabled: false
registry_enabled: false
2017-03-23 09:05:34 +00:00
enable_network_policy: false
local_volume_provisioner_enabled: false
persistent_volumes_enabled: false
cephfs_provisioner_enabled: false
# Base path for local volume provisioner addon
local_volume_base_dir: /mnt/disks
local_volume_mount_dir: /local-disks
local_volume_storage_class: local-storage
2017-06-27 04:27:25 +00:00
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
2017-08-20 10:59:15 +00:00
openstack_lbaas_enabled: false
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
openstack_lbaas_create_monitor: "yes"
openstack_lbaas_monitor_delay: "1m"
openstack_lbaas_monitor_timeout: "30s"
openstack_lbaas_monitor_max_retries: "3"
2017-08-20 10:59:15 +00:00
2017-06-27 04:27:25 +00:00
## List of authorization modes that must be configured for
## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
## 'RBAC' modes are tested. Order is important.
authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volume_provisioner_enabled|string }}']
# Vault data dirs.
vault_base_dir: /etc/vault
vault_cert_dir: "{{ vault_base_dir }}/ssl"
vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed: false
# This two variable are automatically changed by the weave's role in group_vars/k8s-cluster.yml.
# Do not manually change these values
weave_seed: uninitialized
weave_peers: uninitialized
## Set no_proxy to all assigned cluster IPs and hostnames
no_proxy: >-
2017-10-13 14:43:10 +00:00
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name| default('') }},
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }},
{%- if (item != hostvars[item]['ansible_hostname']) -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%}
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
127.0.0.1,localhost
proxy_env:
http_proxy: "{{ http_proxy| default ('') }}"
https_proxy: "{{ https_proxy| default ('') }}"
no_proxy: "{{ no_proxy }}"
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
{%- else -%}
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif -%}
{%- endif %}
kube_apiserver_insecure_endpoint: >-
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
kube_apiserver_client_cert: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.crt
{%- else -%}
{{ kube_cert_dir }}/apiserver.pem
{%- endif %}
kube_apiserver_client_key: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.key
{%- else -%}
{{ kube_cert_dir }}/apiserver-key.pem
{%- endif %}
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}