# Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: none # Directory where the binaries will be installed bin_dir: /usr/local/bin # Kubernetes configuration dirs and system namespace. # Those are where all the additional config stuff goes # the kubernetes normally puts in /srv/kubernets. # This puts them in a sane location and namespace. # Editing those values will almost surely break something. kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" # This is where all of the bearer tokens will be stored kube_token_dir: "{{ kube_config_dir }}/tokens" # This is where to save basic auth file kube_users_dir: "{{ kube_config_dir }}/users" ## Change this to use another Kubernetes version, e.g. a current beta release kube_version: v1.5.3 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" # Random shifts for retrying failed ops like pushing/downloading retry_stagger: 5 # Uncomment this line for CoreOS only. # Directory where python binary is installed # ansible_python_interpreter: "/opt/bin/python" # This is the group that the cert creation scripts chgrp the # cert files to. Not really changable... kube_cert_group: kube-cert # Cluster Loglevel configuration kube_log_level: 2 # Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was # not implemented. As the new flag defaults to true, we have to explicitly disable it. Change this line if you want the # 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5 kube_api_anonymous_auth: false # # For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed # for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes # processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel # modules. # kubelet_load_modules: false # Users to create for basic auth in Kubernetes API via HTTP kube_api_pwd: "changeme" kube_users: kube: pass: "{{kube_api_pwd}}" role: admin root: pass: "{{kube_api_pwd}}" role: admin # Kubernetes cluster name, also will be used as DNS domain cluster_name: cluster.local # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods ndots: 2 # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # For some environments, each node has a publicly accessible # address and an address it should bind services to. These are # really inventory level variables, but described here for consistency. # # When advertising access, the access_ip will be used, but will defer to # ip and then the default ansible ip when unspecified. # # When binding to restrict access, the ip variable will be used, but will # defer to the default ansible ip when unspecified. # # The ip variable is used for specific address binding, e.g. listen address # for etcd. This is use to help with environments like Vagrant or multi-nic # systems where one address should be preferred over another. # ip: 10.2.2.2 # # The access_ip variable is used to define how other nodes should access # the node. This is used in flannel to allow other flannel nodes to see # this node for example. The access_ip is really useful AWS and Google # environments where the nodes are accessed remotely by the "public" ip, # but don't know about that address themselves. # access_ip: 1.1.1.1 # Etcd access modes: # Enable multiaccess to configure clients to access all of the etcd members directly # as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. # This may be the case if clients support and loadbalance multiple etcd servers natively. etcd_multiaccess: true # Assume there are no internal loadbalancers for apiservers exist and listen on # kube_apiserver_port (default 443) loadbalancer_apiserver_localhost: true # Choose network plugin (calico, canal, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: calico # Kubernetes internal network for services, unused block of space. kube_service_addresses: 10.233.0.0/18 # internal network. When used, it will assign IP # addresses from this range to individual pods. # This network must be unused in your network infrastructure! kube_pods_subnet: 10.233.64.0/18 # internal network total size (optional). This is the prefix of the # entire network. Must be unused in your environment. # kube_network_prefix: 18 # internal network node size allocation (optional). This is the size allocated # to each node on your network. With these defaults you should have # room for 4096 nodes with 254 pods per node. kube_network_node_prefix: 24 # With calico it is possible to distributed routes with border routers of the datacenter. peer_with_router: false # Warning : enabling router peering will disable calico's default behavior ('node mesh'). # The subnets of each nodes will be distributed by the datacenter router # API Server service IP address in Kubernetes internal network. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" # The port the API Server will be listening on. kube_apiserver_port: 443 # (https) kube_apiserver_insecure_port: 8080 # (http) # local loadbalancer should use this port instead - default to kube_apiserver_port nginx_kube_apiserver_port: "{{ kube_apiserver_port }}" # Internal DNS configuration. # Kubernetes can create and mainatain its own DNS server to resolve service names # into appropriate IP addresses. It's highly advisable to run such DNS server, # as it greatly simplifies configuration of your applications - you can use # service names instead of magic environment variables. # Can be dnsmasq_kubedns, kubedns or none dns_mode: dnsmasq_kubedns # Can be docker_dns, host_resolvconf or none resolvconf_mode: docker_dns ## Upstream dns servers used by dnsmasq #upstream_dns_servers: # - 8.8.8.8 # - 8.8.4.4 dns_domain: "{{ cluster_name }}" # Ip address of the kubernetes skydns service skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" # There are some changes specific to the cloud providers # for instance we need to encapsulate packets with some network plugins # If set the possible values are either 'gce', 'aws', 'azure' or 'openstack' # When openstack is used make sure to source in the openstack credentials # like you would do when using nova-client before starting the playbook. # When azure is used, you need to also set the following variables. # cloud_provider: # see docs/azure.md for details on how to get these values #azure_tenant_id: #azure_subscription_id: #azure_aad_client_id: #azure_aad_client_secret: #azure_resource_group: #azure_location: #azure_subnet_name: #azure_security_group_name: #azure_vnet_name: #azure_route_table_name: ## Set these proxy values in order to update docker daemon to use proxies # http_proxy: "" # https_proxy: "" # no_proxy: "" # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" ## A string of extra options to pass to the docker daemon. ## This string should be exactly as you wish it to appear. ## An obvious use case is allowing insecure-registry access ## to self hosted registries like so: docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}" docker_bin_dir: "/usr/bin" ## Uncomment this if you want to force overlay/overlay2 as docker storage driver ## Please note that overlay2 is only supported on newer kernels #docker_storage_options: -s overlay2 # K8s image pull policy (imagePullPolicy) k8s_image_pull_policy: IfNotPresent # default packages to install within the cluster kpm_packages: [] # - name: kube-system/grafana # Settings for containerized control plane (etcd/kubelet) rkt_version: 1.21.0 etcd_deployment_type: docker kubelet_deployment_type: docker vault_deployment_type: docker efk_enabled: false ## Certificate Management ## This setting determines whether certs are generated via scripts or whether a ## cluster of Hashicorp's Vault is started to issue certificates (using etcd ## as a backend). Options are "script" or "vault" cert_management: script # Please specify true if you want to perform a kernel upgrade kernel_upgrade: false