e40368ae2b
Added CoreDNS to downloads Updated with labels. Should now work without RBAC too Fix DNS settings on hosts Rename CoreDNS service from kube-dns to coredns Add rotate based on http://edgeofsanity.net/rant/2017/12/20/systemd-resolved-is-broken.html Updated docs with CoreDNS info Added labels and fixed minor settings from official yaml file: https://github.com/kubernetes/kubernetes/blob/release-1.9/cluster/addons/dns/coredns.yaml.sed Added a secondary deployment and secondary service ip. This is to mitigate dns timeouts and create high resitency for failures. See discussion at 'https://github.com/coreos/coreos-kubernetes/issues/641#issuecomment-281174806' Set dns list correct. Thanks to @whereismyjetpack Only download KubeDNS or CoreDNS if selected Move dns cleanup to its own file and import tasks based on dns mode Fix install of KubeDNS when dnsmask_kubedns mode is selected Add new dns option coredns_dual for dual stack deployment. Added variable to configure replicas deployed. Updated docs for dual stack deployment. Removed rotate option in resolv.conf. Run DNS manifests for CoreDNS and KubeDNS Set skydns servers on dual stack deployment Use only one template for CoreDNS dual deployment Set correct cluster ip for the dns server
234 lines
8.2 KiB
YAML
234 lines
8.2 KiB
YAML
# Kubernetes configuration dirs and system namespace.
|
|
# Those are where all the additional config stuff goes
|
|
# the kubernetes normally puts in /srv/kubernets.
|
|
# This puts them in a sane location and namespace.
|
|
# Editting those values will almost surely break something.
|
|
kube_config_dir: /etc/kubernetes
|
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
|
system_namespace: kube-system
|
|
|
|
# This is where all the cert scripts and certs will be located
|
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
|
|
|
# This is where all of the bearer tokens will be stored
|
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|
|
|
# This is where to save basic auth file
|
|
kube_users_dir: "{{ kube_config_dir }}/users"
|
|
|
|
kube_api_anonymous_auth: true
|
|
|
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
|
kube_version: v1.9.3
|
|
|
|
# Where the binaries will be downloaded.
|
|
# Note: ensure that you've enough disk space (about 1G)
|
|
local_release_dir: "/tmp/releases"
|
|
# Random shifts for retrying failed ops like pushing/downloading
|
|
retry_stagger: 5
|
|
|
|
# This is the group that the cert creation scripts chgrp the
|
|
# cert files to. Not really changable...
|
|
kube_cert_group: kube-cert
|
|
|
|
# Cluster Loglevel configuration
|
|
kube_log_level: 2
|
|
|
|
# Users to create for basic auth in Kubernetes API via HTTP
|
|
# Optionally add groups for user
|
|
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
|
kube_users:
|
|
kube:
|
|
pass: "{{kube_api_pwd}}"
|
|
role: admin
|
|
groups:
|
|
- system:masters
|
|
|
|
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
|
#kube_oidc_auth: false
|
|
#kube_basic_auth: false
|
|
#kube_token_auth: false
|
|
|
|
|
|
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
|
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
|
|
|
# kube_oidc_url: https:// ...
|
|
# kube_oidc_client_id: kubernetes
|
|
## Optional settings for OIDC
|
|
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
|
# kube_oidc_username_claim: sub
|
|
# kube_oidc_groups_claim: groups
|
|
|
|
|
|
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
|
kube_network_plugin: calico
|
|
|
|
# weave's network password for encryption
|
|
# if null then no network encryption
|
|
# you can use --extra-vars to pass the password in command line
|
|
weave_password: EnterPasswordHere
|
|
|
|
# Weave uses consensus mode by default
|
|
# Enabling seed mode allow to dynamically add or remove hosts
|
|
# https://www.weave.works/docs/net/latest/ipam/
|
|
weave_mode_seed: false
|
|
|
|
# This two variable are automatically changed by the weave's role, do not manually change these values
|
|
# To reset values :
|
|
# weave_seed: uninitialized
|
|
# weave_peers: uninitialized
|
|
weave_seed: uninitialized
|
|
weave_peers: uninitialized
|
|
|
|
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
|
|
weave_mtu: 1376
|
|
|
|
# Enable kubernetes network policies
|
|
enable_network_policy: false
|
|
|
|
# Kubernetes internal network for services, unused block of space.
|
|
kube_service_addresses: 10.233.0.0/18
|
|
|
|
# internal network. When used, it will assign IP
|
|
# addresses from this range to individual pods.
|
|
# This network must be unused in your network infrastructure!
|
|
kube_pods_subnet: 10.233.64.0/18
|
|
|
|
# internal network node size allocation (optional). This is the size allocated
|
|
# to each node on your network. With these defaults you should have
|
|
# room for 4096 nodes with 254 pods per node.
|
|
kube_network_node_prefix: 24
|
|
|
|
# The port the API Server will be listening on.
|
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
|
kube_apiserver_port: 6443 # (https)
|
|
kube_apiserver_insecure_port: 8080 # (http)
|
|
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
|
#kube_apiserver_insecure_port: 0 # (disabled)
|
|
|
|
# Kube-proxy proxyMode configuration.
|
|
# Can be ipvs, iptables
|
|
kube_proxy_mode: iptables
|
|
|
|
## Encrypting Secret Data at Rest (experimental)
|
|
kube_encrypt_secret_data: false
|
|
|
|
# DNS configuration.
|
|
# Kubernetes cluster name, also will be used as DNS domain
|
|
cluster_name: cluster.local
|
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
|
ndots: 2
|
|
# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
|
|
dns_mode: kubedns
|
|
# Set manual server if using a custom cluster DNS server
|
|
#manual_dns_server: 10.x.x.x
|
|
|
|
# Can be docker_dns, host_resolvconf or none
|
|
resolvconf_mode: docker_dns
|
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
|
deploy_netchecker: false
|
|
# Ip address of the kubernetes skydns service
|
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
|
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
|
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
|
dns_domain: "{{ cluster_name }}"
|
|
|
|
# Path used to store Docker data
|
|
docker_daemon_graph: "/var/lib/docker"
|
|
|
|
## A string of extra options to pass to the docker daemon.
|
|
## This string should be exactly as you wish it to appear.
|
|
## An obvious use case is allowing insecure-registry access
|
|
## to self hosted registries like so:
|
|
|
|
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
|
|
docker_bin_dir: "/usr/bin"
|
|
|
|
# Settings for containerized control plane (etcd/kubelet/secrets)
|
|
etcd_deployment_type: docker
|
|
kubelet_deployment_type: host
|
|
vault_deployment_type: docker
|
|
helm_deployment_type: host
|
|
|
|
# K8s image pull policy (imagePullPolicy)
|
|
k8s_image_pull_policy: IfNotPresent
|
|
|
|
# Kubernetes dashboard
|
|
# RBAC required. see docs/getting-started.md for access details.
|
|
dashboard_enabled: true
|
|
|
|
# Monitoring apps for k8s
|
|
efk_enabled: false
|
|
|
|
# Helm deployment
|
|
helm_enabled: false
|
|
|
|
# Istio deployment
|
|
istio_enabled: false
|
|
|
|
# Registry deployment
|
|
registry_enabled: false
|
|
|
|
# Local volume provisioner deployment
|
|
local_volume_provisioner_enabled: false
|
|
# local_volume_provisioner_namespace: "{{ system_namespace }}"
|
|
# local_volume_provisioner_base_dir: /mnt/disks
|
|
# local_volume_provisioner_mount_dir: /mnt/disks
|
|
# local_volume_provisioner_storage_class: local-storage
|
|
|
|
# CephFS provisioner deployment
|
|
cephfs_provisioner_enabled: false
|
|
# cephfs_provisioner_namespace: "{{ system_namespace }}"
|
|
# cephfs_provisioner_cluster: ceph
|
|
# cephfs_provisioner_monitors:
|
|
# - 172.24.0.1:6789
|
|
# - 172.24.0.2:6789
|
|
# - 172.24.0.3:6789
|
|
# cephfs_provisioner_admin_id: admin
|
|
# cephfs_provisioner_secret: secret
|
|
# cephfs_provisioner_storage_class: cephfs
|
|
|
|
# Nginx ingress controller deployment
|
|
ingress_nginx_enabled: false
|
|
# ingress_nginx_namespace: "ingress-nginx"
|
|
# ingress_nginx_insecure_port: 80
|
|
# ingress_nginx_secure_port: 443
|
|
# ingress_nginx_configmap:
|
|
# map-hash-bucket-size: "128"
|
|
# ssl-protocols: "SSLv2"
|
|
# ingress_nginx_configmap_tcp_services:
|
|
# 9000: "default/example-go:8080"
|
|
# ingress_nginx_configmap_udp_services:
|
|
# 53: "kube-system/kube-dns:53"
|
|
|
|
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
|
persistent_volumes_enabled: false
|
|
|
|
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
|
# kubeconfig_localhost: false
|
|
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
|
# kubectl_localhost: false
|
|
|
|
# dnsmasq
|
|
# dnsmasq_upstream_dns_servers:
|
|
# - /resolvethiszone.with/10.0.4.250
|
|
# - 8.8.8.8
|
|
|
|
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
|
# kubelet_cgroups_per_qos: true
|
|
|
|
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
|
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
|
# kubelet_enforce_node_allocatable: pods
|
|
|
|
## Supplementary addresses that can be added in kubernetes ssl keys.
|
|
## That can be useful for example to setup a keepalived virtual IP
|
|
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
|
|
|
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
|
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
|
## Set this variable to true to get rid of this issue
|
|
volume_cross_zone_attachment: false
|