a15d626771
In order to enable offline/intranet installation cases: * Move DNS/resolvconf configuration to preinstall role. Remove skip_dnsmasq_k8s var as not needed anymore. * Preconfigure DNS stack early, which may be the case when downloading artifacts from intranet repositories. Do not configure K8s DNS resolvers for hosts /etc/resolv.conf yet early (as they may be not existing). * Reconfigure K8s DNS resolvers for hosts only after kubedns/dnsmasq was set up and before K8s apps to be created. * Move docker install task to early stage as well and unbind it from the etcd role's specific install path. Fix external flannel dependency on docker role handlers. Also fix the docker restart handlers' steps ordering to match the expected sequence (the socket then the service). * Add default resolver fact, which is the cloud provider specific and remove hardcoded GCE resolver. * Reduce default ndots for hosts /etc/resolv.conf to 2. Multiple search domains combined with high ndots values lead to poor performance of DNS stack and make ansible workers to fail very often with the "Timeout (12s) waiting for privilege escalation prompt:" error. * Update docs. Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
54 lines
1.8 KiB
YAML
54 lines
1.8 KiB
YAML
---
|
|
run_gitinfos: false
|
|
|
|
# This directory is where all the additional scripts go
|
|
# that Kubernetes normally puts in /srv/kubernetes.
|
|
# This puts them in a sane location
|
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
|
|
|
# This directory is where all the additional config stuff goes
|
|
# the kubernetes normally puts in /srv/kubernets.
|
|
# This puts them in a sane location.
|
|
# Editting this value will almost surely break something. Don't
|
|
# change it. Things like the systemd scripts are hard coded to
|
|
# look in here. Don't do it.
|
|
kube_config_dir: /etc/kubernetes
|
|
|
|
# Logging directory (sysvinit systems)
|
|
kube_log_dir: "/var/log/kubernetes"
|
|
|
|
# This is where you can drop yaml/json files and the kubelet will run those
|
|
# pods on startup
|
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
|
|
|
epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
|
|
|
|
common_required_pkgs:
|
|
- python-httplib2
|
|
- openssl
|
|
- curl
|
|
- rsync
|
|
- bash-completion
|
|
- socat
|
|
|
|
# Set to true if your network does not support IPv6
|
|
# This maybe necessary for pulling Docker images from
|
|
# GCE docker repository
|
|
disable_ipv6_dns: false
|
|
|
|
|
|
# For the openstack integration kubelet will need credentials to access
|
|
# openstack apis like nova and cinder. Per default this values will be
|
|
# read from the environment.
|
|
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
|
|
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
|
|
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
|
|
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
|
|
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID') }}"
|
|
|
|
# All clients access each node individually, instead of using a load balancer.
|
|
etcd_multiaccess: true
|
|
|
|
# CoreOS cloud init config file to define /etc/resolv.conf content
|
|
# for hostnet pods and infra needs
|
|
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
|