# Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: none #Directory where etcd data stored etcd_data_dir: /var/lib/etcd # Directory where the binaries will be installed bin_dir: /usr/local/bin ## The access_ip variable is used to define how other nodes should access ## the node. This is used in flannel to allow other flannel nodes to see ## this node for example. The access_ip is really useful AWS and Google ## environments where the nodes are accessed remotely by the "public" ip, ## but don't know about that address themselves. #access_ip: 1.1.1.1 ## The kube_apiserver_ext_ip is only used when generating the certificate protecting the apiservers ## This is handy in case you want/have to access the apiserver through an external IP ## that is not routable inside the cluster. ## kube_apiserver_ext_ip: 10.10.10.10 ### LOADBALANCING AND ACCESS MODES ## Enable multiaccess to configure etcd clients to access all of the etcd members directly ## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. ## This may be the case if clients support and loadbalance multiple etcd servers natively. #etcd_multiaccess: true ## External LB example config ## apiserver_loadbalancer_domain_name: "elb.some.domain" #loadbalancer_apiserver: # address: 1.2.3.4 # port: 1234 ## Internal loadbalancers for apiservers #loadbalancer_apiserver_localhost: true ## Local loadbalancer should use this port instead, if defined. ## Defaults to kube_apiserver_port (6443) #nginx_kube_apiserver_port: 8443 ### OTHER OPTIONAL VARIABLES ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel ## modules. # kubelet_load_modules: false ## Internal network total size. This is the prefix of the ## entire network. Must be unused in your environment. #kube_network_prefix: 18 ## With calico it is possible to distributed routes with border routers of the datacenter. ## Warning : enabling router peering will disable calico's default behavior ('node mesh'). ## The subnets of each nodes will be distributed by the datacenter router #peer_with_router: false ## Upstream dns servers used by dnsmasq #upstream_dns_servers: # - 8.8.8.8 # - 8.8.4.4 ## There are some changes specific to the cloud providers ## for instance we need to encapsulate packets with some network plugins ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere' ## When openstack is used make sure to source in the openstack credentials ## like you would do when using nova-client before starting the playbook. #cloud_provider: ## When azure is used, you need to also set the following variables. ## see docs/azure.md for details on how to get these values #azure_tenant_id: #azure_subscription_id: #azure_aad_client_id: #azure_aad_client_secret: #azure_resource_group: #azure_location: #azure_subnet_name: #azure_security_group_name: #azure_vnet_name: #azure_route_table_name: ## Set these proxy values in order to update docker daemon to use proxies #http_proxy: "" #https_proxy: "" #no_proxy: "" ## Uncomment this if you want to force overlay/overlay2 as docker storage driver ## Please note that overlay2 is only supported on newer kernels #docker_storage_options: -s overlay2 ## Default packages to install within the cluster, f.e: #kpm_packages: # - name: kube-system/grafana ## Certificate Management ## This setting determines whether certs are generated via scripts or whether a ## cluster of Hashicorp's Vault is started to issue certificates (using etcd ## as a backend). Options are "script" or "vault" #cert_management: script ## Please specify true if you want to perform a kernel upgrade kernel_upgrade: false