c12s-kubespray/roles/kubespray-defaults/defaults/main.yaml
2018-04-19 16:47:20 +03:00

339 lines
14 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

---
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.9.5
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: iptables
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
etcd_data_dir: /var/lib/etcd
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns, manual or none
dns_mode: kubedns
# Should be set to a cluster IP if using a custom cluster DNS
# manual_dns_server: 10.x.x.x
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (cilium, calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Determines if calico-rr group exists
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 64 nodes with 254 pods per node.
# Example: Up to 256 nodes, 100 pods per node (/16 network):
# - kube_service_addresses: 10.233.0.0/17
# - kube_pods_subnet: 10.233.128.0/17
# - kube_network_node_prefix: 25
# Example: Up to 4096 nodes, 100 pods per node (/12 network):
# - kube_service_addresses: 10.192.0.0/13
# - kube_pods_subnet: 10.200.0.0/13
# - kube_network_node_prefix: 25
kube_network_node_prefix: 24
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
# access IP value (automatically evaluated below)
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_bind_address: 0.0.0.0
# https
kube_apiserver_port: 6443
# http
kube_apiserver_insecure_bind_address: 127.0.0.1
kube_apiserver_insecure_port: 8080
# Aggregator
kube_api_aggregator_routing: false
# Docker options
# Optionally do not run docker role
manage_docker: true
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script
vault_deployment_type: docker
helm_deployment_type: host
# Enable kubeadm deployment (experimental)
kubeadm_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
kubectl_localhost: false
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Addons which can be enabled
efk_enabled: false
helm_enabled: false
istio_enabled: false
registry_enabled: false
enable_network_policy: false
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
persistent_volumes_enabled: false
cephfs_provisioner_enabled: false
ingress_nginx_enabled: false
cert_manager_enabled: false
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
openstack_lbaas_enabled: false
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
openstack_lbaas_create_monitor: "yes"
openstack_lbaas_monitor_delay: "1m"
openstack_lbaas_monitor_timeout: "30s"
openstack_lbaas_monitor_max_retries: "3"
## List of authorization modes that must be configured for
## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
## 'RBAC' modes are tested. Order is important.
authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint
kubelet_authentication_token_webhook: false
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates:
- "Initializers={{ istio_enabled | string }}"
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
# Vault data dirs.
vault_base_dir: /etc/vault
vault_cert_dir: "{{ vault_base_dir }}/ssl"
vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
# Local volume provisioner dirs
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed: false
# This two variable are automatically changed by the weave's role in group_vars/k8s-cluster.yml.
# Do not manually change these values
weave_seed: uninitialized
weave_peers: uninitialized
## Set no_proxy to all assigned cluster IPs and hostnames
no_proxy: >-
{%- if http_proxy is defined or https_proxy is defined %}
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name| default('') }},
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }},
{%- if (item != hostvars[item]['ansible_hostname']) -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%}
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
127.0.0.1,localhost
{%- endif %}
proxy_env:
http_proxy: "{{ http_proxy| default ('') }}"
https_proxy: "{{ https_proxy| default ('') }}"
no_proxy: "{{ no_proxy| default ('') }}"
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
{%- else -%}
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif -%}
{%- endif %}
kube_apiserver_insecure_endpoint: >-
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
kube_apiserver_client_cert: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.crt
{%- else -%}
{{ kube_cert_dir }}/apiserver.pem
{%- endif %}
kube_apiserver_client_key: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.key
{%- else -%}
{{ kube_cert_dir }}/apiserver-key.pem
{%- endif %}
# Set to true to deploy etcd-events cluster
etcd_events_cluster_setup: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
etcd_events_peer_url: "https://{{ etcd_access_address }}:2382"
etcd_events_client_url: "https://{{ etcd_access_address }}:2381"
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}