039205560a
* nodelocaldns: allow a secondary pod for nodelocaldns for local-HA * CI: add job to test nodelocaldns secondary
115 lines
3.2 KiB
Django/Jinja
115 lines
3.2 KiB
Django/Jinja
apiVersion: apps/v1
|
|
kind: DaemonSet
|
|
metadata:
|
|
name: nodelocaldns
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: kube-dns
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: nodelocaldns
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: nodelocaldns
|
|
annotations:
|
|
prometheus.io/scrape: 'true'
|
|
prometheus.io/port: '{{ nodelocaldns_prometheus_port }}'
|
|
spec:
|
|
nodeSelector:
|
|
{{ nodelocaldns_ds_nodeselector }}
|
|
priorityClassName: system-cluster-critical
|
|
serviceAccountName: nodelocaldns
|
|
hostNetwork: true
|
|
dnsPolicy: Default # Don't use cluster DNS.
|
|
tolerations:
|
|
- effect: NoSchedule
|
|
operator: "Exists"
|
|
- effect: NoExecute
|
|
operator: "Exists"
|
|
containers:
|
|
- name: node-cache
|
|
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"
|
|
resources:
|
|
limits:
|
|
memory: {{ nodelocaldns_memory_limit }}
|
|
requests:
|
|
cpu: {{ nodelocaldns_cpu_requests }}
|
|
memory: {{ nodelocaldns_memory_requests }}
|
|
args:
|
|
- -localip
|
|
- {{ nodelocaldns_ip }}
|
|
- -conf
|
|
- /etc/coredns/Corefile
|
|
- -upstreamsvc
|
|
- coredns
|
|
{% if enable_nodelocaldns_secondary %}
|
|
- -skipteardown
|
|
{% else %}
|
|
ports:
|
|
- containerPort: 53
|
|
name: dns
|
|
protocol: UDP
|
|
- containerPort: 53
|
|
name: dns-tcp
|
|
protocol: TCP
|
|
- containerPort: 9253
|
|
name: metrics
|
|
protocol: TCP
|
|
{% endif %}
|
|
securityContext:
|
|
privileged: true
|
|
{% if nodelocaldns_bind_metrics_host_ip %}
|
|
env:
|
|
- name: MY_HOST_IP
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: status.hostIP
|
|
{% endif %}
|
|
livenessProbe:
|
|
httpGet:
|
|
host: {{ nodelocaldns_ip }}
|
|
path: /health
|
|
port: {{ nodelocaldns_health_port }}
|
|
scheme: HTTP
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 10
|
|
readinessProbe:
|
|
httpGet:
|
|
host: {{ nodelocaldns_ip }}
|
|
path: /health
|
|
port: {{ nodelocaldns_health_port }}
|
|
scheme: HTTP
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 10
|
|
volumeMounts:
|
|
- name: config-volume
|
|
mountPath: /etc/coredns
|
|
- name: xtables-lock
|
|
mountPath: /run/xtables.lock
|
|
volumes:
|
|
- name: config-volume
|
|
configMap:
|
|
name: nodelocaldns
|
|
items:
|
|
- key: Corefile
|
|
path: Corefile
|
|
{% if dns_etchosts | default(None) %}
|
|
- key: hosts
|
|
path: hosts
|
|
{% endif %}
|
|
- name: xtables-lock
|
|
hostPath:
|
|
path: /run/xtables.lock
|
|
type: FileOrCreate
|
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
|
terminationGracePeriodSeconds: 0
|
|
updateStrategy:
|
|
rollingUpdate:
|
|
maxUnavailable: {{ serial | default('20%') }}
|
|
type: RollingUpdate
|