eb0dcf6063
* Set no_proxy to all local ips * Use proxy settings on all necessary tasks
127 lines
5.2 KiB
YAML
127 lines
5.2 KiB
YAML
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
|
bootstrap_os: none
|
|
|
|
#Directory where etcd data stored
|
|
etcd_data_dir: /var/lib/etcd
|
|
|
|
# Directory where the binaries will be installed
|
|
bin_dir: /usr/local/bin
|
|
|
|
## The access_ip variable is used to define how other nodes should access
|
|
## the node. This is used in flannel to allow other flannel nodes to see
|
|
## this node for example. The access_ip is really useful AWS and Google
|
|
## environments where the nodes are accessed remotely by the "public" ip,
|
|
## but don't know about that address themselves.
|
|
#access_ip: 1.1.1.1
|
|
|
|
### LOADBALANCING AND ACCESS MODES
|
|
## Enable multiaccess to configure etcd clients to access all of the etcd members directly
|
|
## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
|
## This may be the case if clients support and loadbalance multiple etcd servers natively.
|
|
#etcd_multiaccess: true
|
|
|
|
## External LB example config
|
|
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
|
#loadbalancer_apiserver:
|
|
# address: 1.2.3.4
|
|
# port: 1234
|
|
|
|
## Internal loadbalancers for apiservers
|
|
#loadbalancer_apiserver_localhost: true
|
|
|
|
## Local loadbalancer should use this port instead, if defined.
|
|
## Defaults to kube_apiserver_port (6443)
|
|
#nginx_kube_apiserver_port: 8443
|
|
|
|
### OTHER OPTIONAL VARIABLES
|
|
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
|
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
|
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
|
## modules.
|
|
# kubelet_load_modules: false
|
|
|
|
## Internal network total size. This is the prefix of the
|
|
## entire network. Must be unused in your environment.
|
|
#kube_network_prefix: 18
|
|
|
|
## With calico it is possible to distributed routes with border routers of the datacenter.
|
|
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
## The subnets of each nodes will be distributed by the datacenter router
|
|
#peer_with_router: false
|
|
|
|
## Upstream dns servers used by dnsmasq
|
|
#upstream_dns_servers:
|
|
# - 8.8.8.8
|
|
# - 8.8.4.4
|
|
|
|
## There are some changes specific to the cloud providers
|
|
## for instance we need to encapsulate packets with some network plugins
|
|
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
|
|
## When openstack is used make sure to source in the openstack credentials
|
|
## like you would do when using nova-client before starting the playbook.
|
|
#cloud_provider:
|
|
|
|
## When azure is used, you need to also set the following variables.
|
|
## see docs/azure.md for details on how to get these values
|
|
#azure_tenant_id:
|
|
#azure_subscription_id:
|
|
#azure_aad_client_id:
|
|
#azure_aad_client_secret:
|
|
#azure_resource_group:
|
|
#azure_location:
|
|
#azure_subnet_name:
|
|
#azure_security_group_name:
|
|
#azure_vnet_name:
|
|
#azure_route_table_name:
|
|
|
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
|
|
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
|
|
#openstack_lbaas_enabled: True
|
|
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
|
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
|
#openstack_lbaas_create_monitor: "yes"
|
|
#openstack_lbaas_monitor_delay: "1m"
|
|
#openstack_lbaas_monitor_timeout: "30s"
|
|
#openstack_lbaas_monitor_max_retries: "3"
|
|
|
|
## Uncomment to enable experimental kubeadm deployment mode
|
|
#kubeadm_enabled: false
|
|
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
|
|
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
|
|
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
|
|
#
|
|
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
|
#http_proxy: ""
|
|
#https_proxy: ""
|
|
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
|
#no_proxy: ""
|
|
|
|
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
|
## Please note that overlay2 is only supported on newer kernels
|
|
#docker_storage_options: -s overlay2
|
|
|
|
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
|
#docker_dns_servers_strict: false
|
|
|
|
## Default packages to install within the cluster, f.e:
|
|
#kpm_packages:
|
|
# - name: kube-system/grafana
|
|
|
|
## Certificate Management
|
|
## This setting determines whether certs are generated via scripts or whether a
|
|
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
|
|
## as a backend). Options are "script" or "vault"
|
|
#cert_management: script
|
|
|
|
## Please specify true if you want to perform a kernel upgrade
|
|
kernel_upgrade: false
|
|
|
|
# Set to true to allow pre-checks to fail and continue deployment
|
|
#ignore_assert_errors: false
|
|
|
|
## Etcd auto compaction retention for mvcc key value store in hour
|
|
#etcd_compaction_retention: 0
|
|
|
|
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
|
#etcd_metrics: basic
|