Merge pull request #518 from bogdando/issues/516
Allow subdomains of dns_domain and fix kubelet restarts
This commit is contained in:
commit
c96a9bfdfd
7 changed files with 63 additions and 36 deletions
|
@ -7,7 +7,7 @@ to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
|||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
|
||||
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
||||
var. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
||||
place, followed by external resolvers, for example:
|
||||
|
||||
|
@ -21,17 +21,10 @@ or
|
|||
skip_dnsmasq: false
|
||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||
```
|
||||
The vars are explained below as well.
|
||||
|
||||
Remember the limitations (the vars are explained below):
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
limits are a 4 names and 239 chars respectively.
|
||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||
see below. Anyway, the ``nameservers`` can take no more than a two
|
||||
custom DNS servers because of one slot is reserved for a Kubernetes
|
||||
cluster needs.
|
||||
DNS configuration details
|
||||
-------------------------
|
||||
|
||||
Here is an approximate picture of how DNS things working and
|
||||
being configured by Kargo ansible playbooks:
|
||||
|
@ -73,7 +66,27 @@ Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
|||
and will be merged together with the ``skydns_server`` IP into the hots'
|
||||
``/etc/resolv.conf``.
|
||||
|
||||
Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||
for details.
|
||||
|
||||
* There is
|
||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||
for the SkyDNS ``ndots`` param via an
|
||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
||||
as expected as they require the ``ndots:7``.
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
limits are a 4 names and 239 chars respectively.
|
||||
|
||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||
see below. Anyway, the ``nameservers`` can take no more than a two
|
||||
custom DNS servers because of one slot is reserved for a Kubernetes
|
||||
cluster needs.
|
||||
|
|
|
@ -33,6 +33,8 @@ kube_users:
|
|||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
|
|
|
@ -17,5 +17,18 @@
|
|||
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
||||
|
||||
- name: Dnsmasq | update resolvconf
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Dnsmasq | reload resolvconf
|
||||
- Dnsmasq | reload kubelet
|
||||
|
||||
- name: Dnsmasq | reload resolvconf
|
||||
command: /sbin/resolvconf -u
|
||||
ignore_errors: true
|
||||
|
||||
- name: Dnsmasq | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
||||
ignore_errors: true
|
||||
|
|
|
@ -72,6 +72,7 @@
|
|||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- ndots:{{ ndots }}
|
||||
- timeout:2
|
||||
- attempts:2
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/kubedns-amd64:1.6
|
||||
image: gcr.io/google_containers/kubedns-amd64:1.7
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
|
|
|
@ -4,12 +4,14 @@
|
|||
notify:
|
||||
- Master | reload systemd
|
||||
- Master | reload kubelet
|
||||
- Master | wait for master static pods
|
||||
|
||||
- name: wait for master static pods
|
||||
- name: Master | wait for master static pods
|
||||
command: /bin/true
|
||||
notify:
|
||||
- wait for kube-scheduler
|
||||
- wait for kube-controller-manager
|
||||
- Master | wait for the apiserver to be running
|
||||
- Master | wait for kube-scheduler
|
||||
- Master | wait for kube-controller-manager
|
||||
|
||||
- name: Master | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
@ -20,16 +22,23 @@
|
|||
name: kubelet
|
||||
state: restarted
|
||||
|
||||
- name: wait for kube-scheduler
|
||||
- name: Master | wait for kube-scheduler
|
||||
uri: url=http://localhost:10251/healthz
|
||||
register: scheduler_result
|
||||
until: scheduler_result.status == 200
|
||||
retries: 15
|
||||
delay: 5
|
||||
|
||||
- name: wait for kube-controller-manager
|
||||
- name: Master | wait for kube-controller-manager
|
||||
uri: url=http://localhost:10252/healthz
|
||||
register: controller_manager_result
|
||||
until: controller_manager_result.status == 200
|
||||
retries: 15
|
||||
delay: 5
|
||||
|
||||
- name: Master | wait for the apiserver to be running
|
||||
uri: url=http://localhost:8080/healthz
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
delay: 6
|
||||
|
|
|
@ -19,17 +19,9 @@
|
|||
template:
|
||||
src: manifests/kube-apiserver.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
|
||||
register: apiserver_manifest
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: wait for the apiserver to be running
|
||||
uri: url=http://localhost:8080/healthz
|
||||
register: result
|
||||
until: result.status == 200
|
||||
retries: 10
|
||||
delay: 6
|
||||
|
||||
notify: Master | wait for the apiserver to be running
|
||||
|
||||
- meta: flush_handlers
|
||||
# Create kube-system namespace
|
||||
- name: copy 'kube-system' namespace manifest
|
||||
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
|
||||
|
@ -43,7 +35,6 @@
|
|||
failed_when: False
|
||||
run_once: yes
|
||||
|
||||
|
||||
- name: Create 'kube-system' namespace
|
||||
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
|
||||
changed_when: False
|
||||
|
@ -54,12 +45,10 @@
|
|||
template:
|
||||
src: manifests/kube-controller-manager.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
notify: wait for kube-controller-manager
|
||||
notify: Master | wait for kube-controller-manager
|
||||
|
||||
- name: Write kube-scheduler manifest
|
||||
template:
|
||||
src: manifests/kube-scheduler.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
|
||||
notify: wait for kube-scheduler
|
||||
|
||||
- meta: flush_handlers
|
||||
notify: Master | wait for kube-scheduler
|
||||
|
|
Loading…
Reference in a new issue