07cc981971
* Move front-proxy-client certs back to kube mount We want the same CA for all k8s certs * Refactor vault to use a third party module The module adds idempotency and reduces some of the repetitive logic in the vault role Requires ansible-modules-hashivault on ansible node and hvac on the vault hosts themselves Add upgrade test scenario Remove bootstrap-os tags from tasks * fix upgrade issues * improve unseal logic * specify ca and fix etcd check * Fix initialization check bump machine size
140 lines
4.9 KiB
YAML
140 lines
4.9 KiB
YAML
---
|
|
# Valid options: docker (default), rkt, or host
|
|
kubelet_deployment_type: host
|
|
|
|
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
|
|
kube_apiserver_insecure_bind_address: 127.0.0.1
|
|
|
|
# advertised host IP for kubelet. This affects network plugin config. Take caution
|
|
kubelet_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
|
|
|
|
# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces
|
|
kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
|
|
|
|
# resolv.conf to base dns config
|
|
kube_resolv_conf: "/etc/resolv.conf"
|
|
|
|
# Can be ipvs, iptables
|
|
kube_proxy_mode: iptables
|
|
|
|
# If using the pure iptables proxy, SNAT everything. Note that it breaks any
|
|
# policy engine.
|
|
kube_proxy_masquerade_all: false
|
|
|
|
# These options reflect limitations of running kubelet in a container.
|
|
# Modify at your own risk
|
|
kubelet_enable_cri: true
|
|
kubelet_cgroups_per_qos: true
|
|
# Set to empty to avoid cgroup creation
|
|
kubelet_enforce_node_allocatable: "\"\""
|
|
|
|
# Set false to enable sharing a pid namespace between containers in a pod.
|
|
# Note that PID namespace sharing requires docker >= 1.13.1.
|
|
kubelet_disable_shared_pid: true
|
|
|
|
### fail with swap on (default true)
|
|
kubelet_fail_swap_on: true
|
|
|
|
# Reserve this space for kube resources
|
|
kube_memory_reserved: 256M
|
|
kube_cpu_reserved: 100m
|
|
# Reservation for master hosts
|
|
kube_master_memory_reserved: 512M
|
|
kube_master_cpu_reserved: 200m
|
|
|
|
kubelet_status_update_frequency: 10s
|
|
|
|
# Limits for kube components and nginx load balancer app
|
|
kube_proxy_memory_limit: 2000M
|
|
kube_proxy_cpu_limit: 500m
|
|
kube_proxy_memory_requests: 64M
|
|
kube_proxy_cpu_requests: 150m
|
|
nginx_memory_limit: 512M
|
|
nginx_cpu_limit: 300m
|
|
nginx_memory_requests: 32M
|
|
nginx_cpu_requests: 25m
|
|
|
|
# kube_api_runtime_config:
|
|
# - extensions/v1beta1/daemonsets=true
|
|
# - extensions/v1beta1/deployments=true
|
|
|
|
nginx_image_repo: nginx
|
|
nginx_image_tag: 1.13
|
|
|
|
etcd_config_dir: /etc/ssl/etcd
|
|
|
|
kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volume-plugins
|
|
|
|
# A port range to reserve for services with NodePort visibility.
|
|
# Inclusive at both ends of the range.
|
|
kube_apiserver_node_port_range: "30000-32767"
|
|
|
|
kubelet_load_modules: false
|
|
|
|
## Support custom flags to be passed to kubelet
|
|
kubelet_custom_flags: []
|
|
|
|
# This setting is used for rkt based kubelet for deploying hyperkube
|
|
# from a docker based registry ( controls --insecure and docker:// )
|
|
## Empty vaule for quay.io containers
|
|
## docker for docker registry containers
|
|
kube_hyperkube_image_repo: ""
|
|
|
|
# If non-empty, will use this string as identification instead of the actual hostname
|
|
kube_override_hostname: >-
|
|
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
|
{%- else -%}
|
|
{{ inventory_hostname }}
|
|
{%- endif -%}
|
|
|
|
# cAdvisor port
|
|
kube_cadvisor_port: 0
|
|
|
|
# The read-only port for the Kubelet to serve on with no authentication/authorization.
|
|
kube_read_only_port: 0
|
|
|
|
# sysctl_file_path to add sysctl conf to
|
|
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
|
|
|
# For the openstack integration kubelet will need credentials to access
|
|
# openstack apis like nova and cinder. Per default this values will be
|
|
# read from the environment.
|
|
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
|
|
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
|
|
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
|
|
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
|
|
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true)) }}"
|
|
openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
|
|
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
|
|
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
|
|
|
|
# For the vsphere integration, kubelet will need credentials to access
|
|
# vsphere apis
|
|
# Documentation regarding these values can be found
|
|
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
|
|
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
|
|
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
|
|
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
|
|
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
|
|
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
|
|
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
|
|
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
|
|
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
|
|
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
|
|
|
|
vsphere_scsi_controller_type: pvscsi
|
|
# vsphere_public_network is name of the network the VMs are joined to
|
|
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
|
|
|
|
## When azure is used, you need to also set the following variables.
|
|
## see docs/azure.md for details on how to get these values
|
|
# azure_tenant_id:
|
|
# azure_subscription_id:
|
|
# azure_aad_client_id:
|
|
# azure_aad_client_secret:
|
|
# azure_resource_group:
|
|
# azure_location:
|
|
# azure_subnet_name:
|
|
# azure_security_group_name:
|
|
# azure_vnet_name:
|
|
# azure_route_table_name:
|