2015-11-24 15:55:53 +00:00
|
|
|
# Directory where the binaries will be installed
|
|
|
|
bin_dir: /usr/local/bin
|
|
|
|
|
|
|
|
# Where the binaries will be downloaded.
|
|
|
|
# Note: ensure that you've enough disk space (about 1G)
|
|
|
|
local_release_dir: "/tmp/releases"
|
2015-12-11 10:46:02 +00:00
|
|
|
|
2016-01-22 16:18:45 +00:00
|
|
|
# This is the group that the cert creation scripts chgrp the
|
|
|
|
# cert files to. Not really changable...
|
|
|
|
kube_cert_group: kube-cert
|
|
|
|
|
2015-12-12 18:32:18 +00:00
|
|
|
# Cluster Loglevel configuration
|
|
|
|
kube_log_level: 2
|
|
|
|
|
2015-12-11 10:46:02 +00:00
|
|
|
# Users to create for basic auth in Kubernetes API via HTTP
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_users:
|
|
|
|
kube:
|
|
|
|
pass: changeme
|
|
|
|
role: admin
|
2015-12-11 10:46:02 +00:00
|
|
|
# root:
|
|
|
|
# pass: changeme
|
|
|
|
# role: admin
|
|
|
|
|
|
|
|
# Kubernetes cluster name, also will be used as DNS domain
|
2015-12-12 18:32:18 +00:00
|
|
|
cluster_name: cluster.local
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# set this variable to calico if needed. keep it empty if flannel is used
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_network_plugin: calico
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# Kubernetes internal network for services, unused block of space.
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_service_addresses: 10.233.0.0/18
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# internal network. When used, it will assign IP
|
|
|
|
# addresses from this range to individual pods.
|
|
|
|
# This network must be unused in your network infrastructure!
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_pods_subnet: 10.233.64.0/18
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# internal network total size (optional). This is the prefix of the
|
|
|
|
# entire network. Must be unused in your environment.
|
|
|
|
# kube_network_prefix: 18
|
|
|
|
|
|
|
|
# internal network node size allocation (optional). This is the size allocated
|
|
|
|
# to each node on your network. With these defaults you should have
|
|
|
|
# room for 4096 nodes with 254 pods per node.
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_network_node_prefix: 24
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# With calico it is possible to distributed routes with border routers of the datacenter.
|
2015-12-12 18:32:18 +00:00
|
|
|
peer_with_router: false
|
2015-12-11 10:46:02 +00:00
|
|
|
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
|
|
# The subnets of each nodes will be distributed by the datacenter router
|
|
|
|
|
|
|
|
# The port the API Server will be listening on.
|
2015-12-12 18:32:18 +00:00
|
|
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
|
|
|
kube_apiserver_port: 443 # (https)
|
|
|
|
kube_apiserver_insecure_port: 8080 # (http)
|
2015-12-11 10:46:02 +00:00
|
|
|
|
|
|
|
# Internal DNS configuration.
|
|
|
|
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
|
|
|
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
|
|
|
# as it greatly simplifies configuration of your applications - you can use
|
|
|
|
# service names instead of magic environment variables.
|
|
|
|
# You still must manually configure all your containers to use this DNS server,
|
|
|
|
# Kubernetes won't do this for you (yet).
|
|
|
|
|
|
|
|
# Upstream dns servers used by dnsmasq
|
2015-12-12 18:32:18 +00:00
|
|
|
upstream_dns_servers:
|
|
|
|
- 8.8.8.8
|
|
|
|
- 4.4.8.8
|
2015-12-11 10:46:02 +00:00
|
|
|
#
|
|
|
|
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
2015-12-12 18:32:18 +00:00
|
|
|
dns_setup: true
|
|
|
|
dns_domain: "{{ cluster_name }}"
|
2015-12-11 10:46:02 +00:00
|
|
|
#
|
|
|
|
# # Ip address of the kubernetes dns service
|
2015-12-24 12:58:04 +00:00
|
|
|
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
2015-12-15 14:20:08 +00:00
|
|
|
|
|
|
|
# For multi masters architecture:
|
|
|
|
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
|
|
|
# This domain name will be inserted into the /etc/hosts file of all servers
|
2015-12-15 16:01:29 +00:00
|
|
|
# configuration example with haproxy :
|
|
|
|
# listen kubernetes-apiserver-https
|
2015-12-15 14:20:08 +00:00
|
|
|
# bind 10.99.0.21:8383
|
|
|
|
# option ssl-hello-chk
|
|
|
|
# mode tcp
|
|
|
|
# timeout client 3h
|
|
|
|
# timeout server 3h
|
|
|
|
# server master1 10.99.0.26:443
|
|
|
|
# server master2 10.99.0.27:443
|
|
|
|
# balance roundrobin
|
2015-12-15 16:01:29 +00:00
|
|
|
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|