2017-02-24 09:54:25 +00:00
|
|
|
# Kubernetes configuration dirs and system namespace.
|
|
|
|
# Those are where all the additional config stuff goes
|
|
|
|
# the kubernetes normally puts in /srv/kubernets.
|
|
|
|
# This puts them in a sane location and namespace.
|
|
|
|
# Editting those values will almost surely break something.
|
|
|
|
kube_config_dir: /etc/kubernetes
|
|
|
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
|
|
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
|
|
|
system_namespace: kube-system
|
|
|
|
|
|
|
|
# This is where all the cert scripts and certs will be located
|
|
|
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
|
|
|
|
|
|
|
# This is where all of the bearer tokens will be stored
|
|
|
|
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|
|
|
|
|
|
|
# This is where to save basic auth file
|
|
|
|
kube_users_dir: "{{ kube_config_dir }}/users"
|
|
|
|
|
2017-11-06 20:01:10 +00:00
|
|
|
kube_api_anonymous_auth: true
|
2017-02-24 09:54:25 +00:00
|
|
|
|
|
|
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
2018-01-27 15:39:54 +00:00
|
|
|
kube_version: v1.9.2
|
2017-02-24 09:54:25 +00:00
|
|
|
|
|
|
|
# Where the binaries will be downloaded.
|
|
|
|
# Note: ensure that you've enough disk space (about 1G)
|
|
|
|
local_release_dir: "/tmp/releases"
|
|
|
|
# Random shifts for retrying failed ops like pushing/downloading
|
|
|
|
retry_stagger: 5
|
|
|
|
|
|
|
|
# This is the group that the cert creation scripts chgrp the
|
|
|
|
# cert files to. Not really changable...
|
|
|
|
kube_cert_group: kube-cert
|
|
|
|
|
|
|
|
# Cluster Loglevel configuration
|
|
|
|
kube_log_level: 2
|
|
|
|
|
|
|
|
# Users to create for basic auth in Kubernetes API via HTTP
|
2017-07-14 13:27:20 +00:00
|
|
|
# Optionally add groups for user
|
2018-02-01 06:42:34 +00:00
|
|
|
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
2017-02-24 09:54:25 +00:00
|
|
|
kube_users:
|
|
|
|
kube:
|
|
|
|
pass: "{{kube_api_pwd}}"
|
|
|
|
role: admin
|
2017-09-09 20:38:03 +00:00
|
|
|
groups:
|
|
|
|
- system:masters
|
2017-02-27 13:15:50 +00:00
|
|
|
|
|
|
|
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
|
|
|
#kube_oidc_auth: false
|
2017-10-15 19:41:17 +00:00
|
|
|
#kube_basic_auth: false
|
|
|
|
#kube_token_auth: false
|
2017-02-27 13:15:50 +00:00
|
|
|
|
|
|
|
|
2017-02-27 12:24:21 +00:00
|
|
|
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
|
|
|
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
2017-02-27 13:15:50 +00:00
|
|
|
|
2017-02-27 12:24:21 +00:00
|
|
|
# kube_oidc_url: https:// ...
|
|
|
|
# kube_oidc_client_id: kubernetes
|
|
|
|
## Optional settings for OIDC
|
|
|
|
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
|
|
|
# kube_oidc_username_claim: sub
|
|
|
|
# kube_oidc_groups_claim: groups
|
|
|
|
|
|
|
|
|
contiv network support (#1914)
* Add Contiv support
Contiv is a network plugin for Kubernetes and Docker. It supports
vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies,
multiple networks and bridging pods onto physical networks.
* Update contiv version to 1.1.4
Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config.
* Load openvswitch module to workaround on CentOS7.4
* Set contiv cni version to 0.1.0
Correct contiv CNI version to 0.1.0.
* Use kube_apiserver_endpoint for K8S_API_SERVER
Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks
to a available endpoint no matter if there's a loadbalancer or not.
* Make contiv use its own etcd
Before this commit, contiv is using a etcd proxy mode to k8s etcd,
this work fine when the etcd hosts are co-located with contiv etcd
proxy, however the k8s peering certs are only in etcd group, as a
result the etcd-proxy is not able to peering with the k8s etcd on
etcd group, plus the netplugin is always trying to find the etcd
endpoint on localhost, this will cause problem for all netplugins
not runnign on etcd group nodes.
This commit make contiv uses its own etcd, separate from k8s one.
on kube-master nodes (where net-master runs), it will run as leader
mode and on all rest nodes it will run as proxy mode.
* Use cp instead of rsync to copy cni binaries
Since rsync has been removed from hyperkube, this commit changes it
to use cp instead.
* Make contiv-etcd able to run on master nodes
* Add rbac_enabled flag for contiv pods
* Add contiv into CNI network plugin lists
* migrate contiv test to tests/files
Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
* Add required rules for contiv netplugin
* Better handling json return of fwdMode
* Make contiv etcd port configurable
* Use default var instead of templating
* roles/download/defaults/main.yml: use contiv 1.1.7
Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
|
|
|
# Choose network plugin (calico, contiv, weave or flannel)
|
2017-02-24 09:54:25 +00:00
|
|
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
|
|
|
kube_network_plugin: calico
|
|
|
|
|
2017-07-26 16:09:34 +00:00
|
|
|
# weave's network password for encryption
|
|
|
|
# if null then no network encryption
|
|
|
|
# you can use --extra-vars to pass the password in command line
|
|
|
|
weave_password: EnterPasswordHere
|
|
|
|
|
|
|
|
# Weave uses consensus mode by default
|
|
|
|
# Enabling seed mode allow to dynamically add or remove hosts
|
|
|
|
# https://www.weave.works/docs/net/latest/ipam/
|
|
|
|
weave_mode_seed: false
|
|
|
|
|
|
|
|
# This two variable are automatically changed by the weave's role, do not manually change these values
|
|
|
|
# To reset values :
|
|
|
|
# weave_seed: uninitialized
|
|
|
|
# weave_peers: uninitialized
|
|
|
|
weave_seed: uninitialized
|
|
|
|
weave_peers: uninitialized
|
2017-06-20 12:50:08 +00:00
|
|
|
|
2018-02-05 09:17:21 +00:00
|
|
|
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
|
|
|
|
weave_mtu: 1376
|
|
|
|
|
2017-03-13 15:04:31 +00:00
|
|
|
# Enable kubernetes network policies
|
|
|
|
enable_network_policy: false
|
|
|
|
|
2017-02-24 09:54:25 +00:00
|
|
|
# Kubernetes internal network for services, unused block of space.
|
|
|
|
kube_service_addresses: 10.233.0.0/18
|
|
|
|
|
|
|
|
# internal network. When used, it will assign IP
|
|
|
|
# addresses from this range to individual pods.
|
|
|
|
# This network must be unused in your network infrastructure!
|
|
|
|
kube_pods_subnet: 10.233.64.0/18
|
|
|
|
|
|
|
|
# internal network node size allocation (optional). This is the size allocated
|
|
|
|
# to each node on your network. With these defaults you should have
|
|
|
|
# room for 4096 nodes with 254 pods per node.
|
|
|
|
kube_network_node_prefix: 24
|
|
|
|
|
|
|
|
# The port the API Server will be listening on.
|
|
|
|
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
2017-02-24 14:58:54 +00:00
|
|
|
kube_apiserver_port: 6443 # (https)
|
2017-02-24 09:54:25 +00:00
|
|
|
kube_apiserver_insecure_port: 8080 # (http)
|
2017-11-06 20:01:10 +00:00
|
|
|
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
|
|
|
#kube_apiserver_insecure_port: 0 # (disabled)
|
2017-02-24 09:54:25 +00:00
|
|
|
|
2018-01-29 05:15:32 +00:00
|
|
|
# Kube-proxy proxyMode configuration.
|
|
|
|
# Can be ipvs, iptables
|
|
|
|
kube_proxy_mode: iptables
|
|
|
|
|
2017-02-24 09:54:25 +00:00
|
|
|
# DNS configuration.
|
|
|
|
# Kubernetes cluster name, also will be used as DNS domain
|
|
|
|
cluster_name: cluster.local
|
|
|
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
|
|
|
ndots: 2
|
2018-01-23 13:28:42 +00:00
|
|
|
# Can be dnsmasq_kubedns, kubedns, manual or none
|
2017-05-12 19:57:24 +00:00
|
|
|
dns_mode: kubedns
|
2018-01-23 13:28:42 +00:00
|
|
|
# Set manual server if using a custom cluster DNS server
|
|
|
|
#manual_dns_server: 10.x.x.x
|
|
|
|
|
2017-02-24 09:54:25 +00:00
|
|
|
# Can be docker_dns, host_resolvconf or none
|
|
|
|
resolvconf_mode: docker_dns
|
|
|
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
|
|
|
deploy_netchecker: false
|
|
|
|
# Ip address of the kubernetes skydns service
|
|
|
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
2017-10-11 19:40:21 +00:00
|
|
|
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
2017-02-24 09:54:25 +00:00
|
|
|
dns_domain: "{{ cluster_name }}"
|
|
|
|
|
|
|
|
# Path used to store Docker data
|
|
|
|
docker_daemon_graph: "/var/lib/docker"
|
|
|
|
|
|
|
|
## A string of extra options to pass to the docker daemon.
|
|
|
|
## This string should be exactly as you wish it to appear.
|
|
|
|
## An obvious use case is allowing insecure-registry access
|
|
|
|
## to self hosted registries like so:
|
2017-07-07 10:39:42 +00:00
|
|
|
|
|
|
|
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
|
2017-02-24 09:54:25 +00:00
|
|
|
docker_bin_dir: "/usr/bin"
|
|
|
|
|
|
|
|
# Settings for containerized control plane (etcd/kubelet/secrets)
|
|
|
|
etcd_deployment_type: docker
|
2017-08-21 07:53:49 +00:00
|
|
|
kubelet_deployment_type: host
|
2017-02-24 09:54:25 +00:00
|
|
|
vault_deployment_type: docker
|
2017-11-29 19:52:54 +00:00
|
|
|
helm_deployment_type: host
|
2017-02-24 09:54:25 +00:00
|
|
|
|
|
|
|
# K8s image pull policy (imagePullPolicy)
|
|
|
|
k8s_image_pull_policy: IfNotPresent
|
|
|
|
|
2017-11-09 21:59:30 +00:00
|
|
|
# Kubernetes dashboard
|
|
|
|
# RBAC required. see docs/getting-started.md for access details.
|
2017-09-09 20:38:03 +00:00
|
|
|
dashboard_enabled: true
|
|
|
|
|
2017-02-24 09:54:25 +00:00
|
|
|
# Monitoring apps for k8s
|
|
|
|
efk_enabled: false
|
2017-03-17 11:56:25 +00:00
|
|
|
|
|
|
|
# Helm deployment
|
|
|
|
helm_enabled: false
|
2017-07-19 14:57:36 +00:00
|
|
|
|
2017-11-01 14:25:35 +00:00
|
|
|
# Istio deployment
|
2017-10-13 14:42:54 +00:00
|
|
|
istio_enabled: false
|
|
|
|
|
2018-02-04 04:32:33 +00:00
|
|
|
# Registry deployment
|
|
|
|
registry_enabled: false
|
|
|
|
|
2017-11-01 14:25:35 +00:00
|
|
|
# Local volume provisioner deployment
|
2018-02-07 06:58:50 +00:00
|
|
|
# deprecated will be removed
|
|
|
|
local_volumes_enabled: false
|
2018-02-07 08:24:00 +00:00
|
|
|
local_volume_provisioner_enabled: "{{ local_volumes_enabled }}"
|
2017-11-01 14:25:35 +00:00
|
|
|
|
2018-02-01 04:25:21 +00:00
|
|
|
# CephFS provisioner deployment
|
|
|
|
cephfs_provisioner_enabled: false
|
|
|
|
# cephfs_provisioner_namespace: "{{ system_namespace }}"
|
|
|
|
# cephfs_provisioner_cluster: ceph
|
|
|
|
# cephfs_provisioner_monitors:
|
|
|
|
# - 172.24.0.1:6789
|
|
|
|
# - 172.24.0.2:6789
|
|
|
|
# - 172.24.0.3:6789
|
|
|
|
# cephfs_provisioner_admin_id: admin
|
|
|
|
# cephfs_provisioner_secret: secret
|
|
|
|
|
2017-12-19 14:47:00 +00:00
|
|
|
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
|
|
|
persistent_volumes_enabled: false
|
|
|
|
|
2017-09-18 12:30:57 +00:00
|
|
|
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
|
|
|
# kubeconfig_localhost: false
|
|
|
|
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
|
|
|
# kubectl_localhost: false
|
|
|
|
|
2017-07-19 14:57:36 +00:00
|
|
|
# dnsmasq
|
|
|
|
# dnsmasq_upstream_dns_servers:
|
|
|
|
# - /resolvethiszone.with/10.0.4.250
|
|
|
|
# - 8.8.8.8
|
|
|
|
|
|
|
|
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
|
|
|
# kubelet_cgroups_per_qos: true
|
|
|
|
|
|
|
|
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
2018-01-17 15:42:27 +00:00
|
|
|
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
2017-07-19 14:57:36 +00:00
|
|
|
# kubelet_enforce_node_allocatable: pods
|
2017-10-17 10:06:07 +00:00
|
|
|
|
|
|
|
## Supplementary addresses that can be added in kubernetes ssl keys.
|
2018-01-17 15:42:27 +00:00
|
|
|
## That can be useful for example to setup a keepalived virtual IP
|
2017-10-17 10:06:07 +00:00
|
|
|
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
2018-01-18 10:55:23 +00:00
|
|
|
|
|
|
|
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
|
|
|
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
|
|
|
## Set this variable to true to get rid of this issue
|
2018-01-23 13:14:00 +00:00
|
|
|
volume_cross_zone_attachment: false
|