119 lines
4.8 KiB
YAML
119 lines
4.8 KiB
YAML
---
|
|
## Directory where etcd data stored
|
|
etcd_data_dir: /var/lib/etcd
|
|
|
|
## Experimental kubeadm etcd deployment mode. Available only for new deployment
|
|
etcd_kubeadm_enabled: false
|
|
|
|
## Directory where the binaries will be installed
|
|
bin_dir: /usr/local/bin
|
|
|
|
## The access_ip variable is used to define how other nodes should access
|
|
## the node. This is used in flannel to allow other flannel nodes to see
|
|
## this node for example. The access_ip is really useful AWS and Google
|
|
## environments where the nodes are accessed remotely by the "public" ip,
|
|
## but don't know about that address themselves.
|
|
# access_ip: 1.1.1.1
|
|
|
|
|
|
## External LB example config
|
|
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
|
# loadbalancer_apiserver:
|
|
# address: 1.2.3.4
|
|
# port: 1234
|
|
|
|
## Internal loadbalancers for apiservers
|
|
# loadbalancer_apiserver_localhost: true
|
|
# valid options are "nginx" or "haproxy"
|
|
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
|
|
|
## If the cilium is going to be used in strict mode, we can use the
|
|
## localhost connection and not use the external LB. If this parameter is
|
|
## not specified, the first node to connect to kubeapi will be used.
|
|
# use_localhost_as_kubeapi_loadbalancer: true
|
|
|
|
## Local loadbalancer should use this port
|
|
## And must be set port 6443
|
|
loadbalancer_apiserver_port: 6443
|
|
|
|
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
|
|
loadbalancer_apiserver_healthcheck_port: 8081
|
|
|
|
### OTHER OPTIONAL VARIABLES
|
|
|
|
## Upstream dns servers
|
|
# upstream_dns_servers:
|
|
# - 8.8.8.8
|
|
# - 8.8.4.4
|
|
|
|
## There are some changes specific to the cloud providers
|
|
## for instance we need to encapsulate packets with some network plugins
|
|
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
|
## When openstack is used make sure to source in the openstack credentials
|
|
## like you would do when using openstack-client before starting the playbook.
|
|
# cloud_provider:
|
|
|
|
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
|
|
## Supported cloud controllers are: 'openstack' and 'vsphere'
|
|
## When openstack or vsphere are used make sure to source in the required fields
|
|
# external_cloud_provider:
|
|
|
|
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
|
# http_proxy: ""
|
|
# https_proxy: ""
|
|
|
|
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
|
# no_proxy: ""
|
|
|
|
## Some problems may occur when downloading files over https proxy due to ansible bug
|
|
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
|
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
|
# download_validate_certs: False
|
|
|
|
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
|
# additional_no_proxy: ""
|
|
|
|
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
|
|
## skip_http_proxy_on_os_packages to true
|
|
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
|
|
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
|
|
# skip_http_proxy_on_os_packages: false
|
|
|
|
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
|
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
|
## no_proxy variable, set below to true:
|
|
no_proxy_exclude_workers: false
|
|
|
|
## Certificate Management
|
|
## This setting determines whether certs are generated via scripts.
|
|
## Chose 'none' if you provide your own certificates.
|
|
## Option is "script", "none"
|
|
## note: vault is removed
|
|
# cert_management: script
|
|
|
|
## Set to true to allow pre-checks to fail and continue deployment
|
|
# ignore_assert_errors: false
|
|
|
|
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
|
# kube_read_only_port: 10255
|
|
|
|
## Set true to download and cache container
|
|
# download_container: true
|
|
|
|
## Deploy container engine
|
|
# Set false if you want to deploy container engine manually.
|
|
# deploy_container_engine: true
|
|
|
|
## Red Hat Enterprise Linux subscription registration
|
|
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
|
|
## Update RHEL subscription purpose usage, role and SLA if necessary
|
|
# rh_subscription_username: ""
|
|
# rh_subscription_password: ""
|
|
# rh_subscription_org_id: ""
|
|
# rh_subscription_activation_key: ""
|
|
# rh_subscription_usage: "Development"
|
|
# rh_subscription_role: "Red Hat Enterprise Server"
|
|
# rh_subscription_sla: "Self-Support"
|
|
|
|
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
|
# ping_access_ip: true
|