# Kubernetes configuration dirs and system namespace. # Those are where all the additional config stuff goes # the kubernetes normally puts in /srv/kubernetes. # This puts them in a sane location and namespace. # Editing those values will almost surely break something. kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" # This is where all of the bearer tokens will be stored kube_token_dir: "{{ kube_config_dir }}/tokens" # This is where to save basic auth file kube_users_dir: "{{ kube_config_dir }}/users" kube_api_anonymous_auth: true ## Change this to use another Kubernetes version, e.g. a current beta release kube_version: v1.11.2 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" # Random shifts for retrying failed ops like pushing/downloading retry_stagger: 5 # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... kube_cert_group: kube-cert # Cluster Loglevel configuration kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}" kube_users: kube: pass: "{{kube_api_pwd}}" role: admin groups: - system:masters ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) #kube_oidc_auth: false #kube_basic_auth: false #kube_token_auth: false ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) # kube_oidc_url: https:// ... # kube_oidc_client_id: kubernetes ## Optional settings for OIDC # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_username_claim: sub # kube_oidc_username_prefix: oidc: # kube_oidc_groups_claim: groups # kube_oidc_groups_prefix: oidc: # Choose network plugin (cilium, calico, contiv, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: calico # Weave deployment # weave_password: ~ # weave_checkpoint_disable: false # weave_conn_limit: 100 # weave_hairpin_mode: true # weave_ipalloc_range: {{ kube_pods_subnet }} # weave_expect_npc: {{ enable_network_policy }} # weave_kube_peers: ~ # weave_ipalloc_init: ~ # weave_expose_ip: ~ # weave_metrics_addr: ~ # weave_status_addr: ~ # weave_mtu: 1376 # weave_no_masq_local: true # weave_extra_args: ~ # Kubernetes internal network for services, unused block of space. kube_service_addresses: 10.233.0.0/18 # internal network. When used, it will assign IP # addresses from this range to individual pods. # This network must be unused in your network infrastructure! kube_pods_subnet: 10.233.64.0/18 # internal network node size allocation (optional). This is the size allocated # to each node on your network. With these defaults you should have # room for 4096 nodes with 254 pods per node. kube_network_node_prefix: 24 # The port the API Server will be listening on. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" kube_apiserver_port: 6443 # (https) kube_apiserver_insecure_port: 8080 # (http) # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true #kube_apiserver_insecure_port: 0 # (disabled) # Kube-proxy proxyMode configuration. # Can be ipvs, iptables kube_proxy_mode: iptables # Kube-proxy nodeport address. # cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest kube_proxy_nodeport_addresses: false # kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24 ## Encrypting Secret Data at Rest (experimental) kube_encrypt_secret_data: false # DNS configuration. # Kubernetes cluster name, also will be used as DNS domain cluster_name: cluster.local # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods ndots: 2 # Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none dns_mode: kubedns # Set manual server if using a custom cluster DNS server #manual_dns_server: 10.x.x.x # Can be docker_dns, host_resolvconf or none resolvconf_mode: docker_dns # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes skydns service skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" dns_domain: "{{ cluster_name }}" # Container runtime # docker for docker and crio for cri-o. container_manager: docker # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" ## Used to set docker daemon iptables options to true #docker_iptables_enabled: "true" ## A string of extra options to pass to the docker daemon. ## This string should be exactly as you wish it to appear. ## An obvious use case is allowing insecure-registry access ## to self hosted registries like so: docker_options: >- --insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }} {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %} --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false {%- endif -%} docker_bin_dir: "/usr/bin" ## If non-empty will override default system MounFlags value. ## This option takes a mount propagation flag: shared, slave ## or private, which control whether mounts in the file system ## namespace set up for docker will receive or propagate mounts ## and unmounts. Leave empty for system default docker_mount_flags: # Settings for containerized control plane (etcd/kubelet/secrets) etcd_deployment_type: docker kubelet_deployment_type: host vault_deployment_type: docker helm_deployment_type: host # K8s image pull policy (imagePullPolicy) k8s_image_pull_policy: IfNotPresent # audit log for kubernetes kubernetes_audit: false # pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) podsecuritypolicy_enabled: false # Kubernetes dashboard # RBAC required. see docs/getting-started.md for access details. dashboard_enabled: true # Monitoring apps for k8s efk_enabled: false # Helm deployment helm_enabled: false # Registry deployment registry_enabled: false # registry_namespace: "{{ system_namespace }}" # registry_storage_class: "" # registry_disk_size: "10Gi" # Local volume provisioner deployment local_volume_provisioner_enabled: false # local_volume_provisioner_namespace: "{{ system_namespace }}" # local_volume_provisioner_base_dir: /mnt/disks # local_volume_provisioner_mount_dir: /mnt/disks # local_volume_provisioner_storage_class: local-storage # CephFS provisioner deployment cephfs_provisioner_enabled: false # cephfs_provisioner_namespace: "cephfs-provisioner" # cephfs_provisioner_cluster: ceph # cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" # cephfs_provisioner_admin_id: admin # cephfs_provisioner_secret: secret # cephfs_provisioner_storage_class: cephfs # cephfs_provisioner_reclaim_policy: Delete # cephfs_provisioner_claim_root: /volumes # cephfs_provisioner_deterministic_names: true # Nginx ingress controller deployment ingress_nginx_enabled: false # ingress_nginx_host_network: false # ingress_nginx_nodeselector: # node-role.kubernetes.io/master: "true" # ingress_nginx_namespace: "ingress-nginx" # ingress_nginx_insecure_port: 80 # ingress_nginx_secure_port: 443 # ingress_nginx_configmap: # map-hash-bucket-size: "128" # ssl-protocols: "SSLv2" # ingress_nginx_configmap_tcp_services: # 9000: "default/example-go:8080" # ingress_nginx_configmap_udp_services: # 53: "kube-system/kube-dns:53" # Cert manager deployment cert_manager_enabled: false # cert_manager_namespace: "cert-manager" # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts # kubeconfig_localhost: false # Download kubectl onto the host that runs Ansible in {{ bin_dir }} # kubectl_localhost: false # dnsmasq # dnsmasq_upstream_dns_servers: # - /resolvethiszone.with/10.0.4.250 # - 8.8.8.8 # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) # kubelet_cgroups_per_qos: true # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". # kubelet_enforce_node_allocatable: pods ## Supplementary addresses that can be added in kubernetes ssl keys. ## That can be useful for example to setup a keepalived virtual IP # supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. ## See https://github.com/kubernetes-incubator/kubespray/issues/2141 ## Set this variable to true to get rid of this issue volume_cross_zone_attachment: false