Split group-variables

This commit is contained in:
Antoine Legrand 2018-08-31 16:05:45 +02:00
parent 6204b85a37
commit e98ba9e839
11 changed files with 248 additions and 246 deletions

View file

@ -1,155 +0,0 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
#Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
#access_ip: 1.1.1.1
### ETCD: disable peer client cert authentication.
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
#etcd_peer_client_auth: true
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
#loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
#loadbalancer_apiserver_localhost: true
## Local loadbalancer should use this port instead, if defined.
## Defaults to kube_apiserver_port (6443)
#nginx_kube_apiserver_port: 8443
### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
#kubelet_load_modules: false
## Internal network total size. This is the prefix of the
## entire network. Must be unused in your environment.
#kube_network_prefix: 18
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
## Upstream dns servers used by dnsmasq
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook.
#cloud_provider:
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
#azure_aad_client_secret:
#azure_resource_group:
#azure_location:
#azure_subnet_name:
#azure_security_group_name:
#azure_vnet_name:
#azure_vnet_resource_group:
#azure_route_table_name:
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
#openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
#openstack_lbaas_enabled: True
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
#openstack_lbaas_use_octavia: False
#openstack_lbaas_method: "ROUND_ROBIN"
#openstack_lbaas_provider: "haproxy"
#openstack_lbaas_create_monitor: "yes"
#openstack_lbaas_monitor_delay: "1m"
#openstack_lbaas_monitor_timeout: "30s"
#openstack_lbaas_monitor_max_retries: "3"
## When Oracle Cloud Infrastructure is used, set these variables
#oci_private_key:
#oci_region_id:
#oci_tenancy_id:
#oci_user_id:
#oci_user_fingerprint:
#oci_compartment_id:
#oci_vnc_id:
#oci_subnet1_id:
#oci_subnet2_id:
## Overide these default behaviors if you wish
#oci_security_list_management: All
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
#oci_use_instance_principals: false
#oci_cloud_controller_version: 0.5.0
## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
## Set these proxy values in order to update package manager and docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
#no_proxy: ""
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
#docker_dns_servers_strict: false
## Certificate Management
## This setting determines whether certs are generated via scripts or whether a
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
## as a backend). Options are "script" or "vault"
#cert_management: script
# Set to true to allow pre-checks to fail and continue deployment
#ignore_assert_errors: false
## Etcd auto compaction retention for mvcc key value store in hour
#etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
#etcd_metrics: basic
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
#etcd_memory_limit: "512M"
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
## etcd documentation for more information.
#etcd_quota_backend_bytes: "2G"
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
#kube_read_only_port: 10255
# Does coreos need auto upgrade, default is true
#coreos_auto_upgrade: true
# Set true to download and cache container
#download_container: true

View file

@ -0,0 +1,86 @@
## Valid bootstrap options (required): ubuntu, coreos, centos, none
## If the OS is not listed here, it means it doesn't require extra/bootstrap steps.
## In example, python is not available on 'coreos' so it must be installed before
## anything else. In the opposite, Debian has already all its dependencies fullfiled, then bootstrap_os should be set to `none`.
bootstrap_os: none
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
#access_ip: 1.1.1.1
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
#loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
#loadbalancer_apiserver_localhost: true
## Local loadbalancer should use this port instead, if defined.
## Defaults to kube_apiserver_port (6443)
#nginx_kube_apiserver_port: 8443
### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
#kubelet_load_modules: false
## Internal network total size. This is the prefix of the
## entire network. Must be unused in your environment.
#kube_network_prefix: 18
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
## Upstream dns servers used by dnsmasq
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook.
#cloud_provider:
## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
## Set these proxy values in order to update package manager and docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
#no_proxy: ""
## Certificate Management
## This setting determines whether certs are generated via scripts or whether a
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
## as a backend). Options are "script" or "vault"
#cert_management: script
## Set to true to allow pre-checks to fail and continue deployment
#ignore_assert_errors: false
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
#kube_read_only_port: 10255
## Set true to download and cache container
#download_container: true

View file

@ -0,0 +1,14 @@
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
#azure_aad_client_secret:
#azure_resource_group:
#azure_location:
#azure_subnet_name:
#azure_security_group_name:
#azure_vnet_name:
#azure_vnet_resource_group:
#azure_route_table_name:

View file

@ -0,0 +1,2 @@
## Does coreos need auto upgrade, default is true
#coreos_auto_upgrade: true

View file

@ -0,0 +1,35 @@
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
#docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
#docker_iptables_enabled: "true"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: >-
--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
{%- endif -%}
docker_bin_dir: "/usr/bin"
## If non-empty will override default system MounFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
docker_mount_flags:

View file

@ -0,0 +1,15 @@
## When Oracle Cloud Infrastructure is used, set these variables
#oci_private_key:
#oci_region_id:
#oci_tenancy_id:
#oci_user_id:
#oci_user_fingerprint:
#oci_compartment_id:
#oci_vnc_id:
#oci_subnet1_id:
#oci_subnet2_id:
## Overide these default behaviors if you wish
#oci_security_list_management: All
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
#oci_use_instance_principals: false
#oci_cloud_controller_version: 0.5.0

View file

@ -0,0 +1,15 @@
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
#openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
#openstack_lbaas_enabled: True
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
#openstack_lbaas_use_octavia: False
#openstack_lbaas_method: "ROUND_ROBIN"
#openstack_lbaas_provider: "haproxy"
#openstack_lbaas_create_monitor: "yes"
#openstack_lbaas_monitor_delay: "1m"
#openstack_lbaas_monitor_timeout: "30s"
#openstack_lbaas_monitor_max_retries: "3"

View file

@ -0,0 +1,18 @@
## Etcd auto compaction retention for mvcc key value store in hour
#etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
#etcd_metrics: basic
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
#etcd_memory_limit: "512M"
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
## etcd documentation for more information.
#etcd_quota_backend_bytes: "2G"
### ETCD: disable peer client cert authentication.
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
#etcd_peer_client_auth: true

View file

@ -0,0 +1,54 @@
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Monitoring apps for k8s
efk_enabled: false
# Helm deployment
helm_enabled: false
# Registry deployment
registry_enabled: false
# registry_namespace: "{{ system_namespace }}"
# registry_storage_class: ""
# registry_disk_size: "10Gi"
# Local volume provisioner deployment
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: "{{ system_namespace }}"
# local_volume_provisioner_base_dir: /mnt/disks
# local_volume_provisioner_mount_dir: /mnt/disks
# local_volume_provisioner_storage_class: local-storage
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# cephfs_provisioner_reclaim_policy: Delete
# cephfs_provisioner_claim_root: /volumes
# cephfs_provisioner_deterministic_names: true
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
# ingress_nginx_nodeselector:
# node-role.kubernetes.io/master: "true"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
# ingress_nginx_configmap:
# map-hash-bucket-size: "128"
# ssl-protocols: "SSLv2"
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/kube-dns:53"
# Cert manager deployment
cert_manager_enabled: false
# cert_manager_namespace: "cert-manager"

View file

@ -135,38 +135,11 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Container runtime
# docker for docker and crio for cri-o.
## Container runtime
## docker for docker and crio for cri-o.
container_manager: docker
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
#docker_iptables_enabled: "true"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: >-
--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
{%- endif -%}
docker_bin_dir: "/usr/bin"
## If non-empty will override default system MounFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
docker_mount_flags:
# Settings for containerized control plane (etcd/kubelet/secrets)
## Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: host
vault_deployment_type: docker
@ -181,64 +154,6 @@ kubernetes_audit: false
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
podsecuritypolicy_enabled: false
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Monitoring apps for k8s
efk_enabled: false
# Helm deployment
helm_enabled: false
# Registry deployment
registry_enabled: false
# registry_namespace: "{{ system_namespace }}"
# registry_storage_class: ""
# registry_disk_size: "10Gi"
# Local volume provisioner deployment
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: "{{ system_namespace }}"
# local_volume_provisioner_base_dir: /mnt/disks
# local_volume_provisioner_mount_dir: /mnt/disks
# local_volume_provisioner_storage_class: local-storage
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# cephfs_provisioner_reclaim_policy: Delete
# cephfs_provisioner_claim_root: /volumes
# cephfs_provisioner_deterministic_names: true
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
# ingress_nginx_nodeselector:
# node-role.kubernetes.io/master: "true"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
# ingress_nginx_configmap:
# map-hash-bucket-size: "128"
# ssl-protocols: "SSLv2"
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/kube-dns:53"
# Cert manager deployment
cert_manager_enabled: false
# cert_manager_namespace: "cert-manager"
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
persistent_volumes_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
# kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
@ -264,3 +179,6 @@ persistent_volumes_enabled: false
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
persistent_volumes_enabled: false

View file

@ -27,6 +27,6 @@
# node5
# node6
# [k8s-cluster:children]
# kube-master
# kube-node
[k8s-cluster:children]
kube-master
kube-node