Adding support for Prometheus
This commit is contained in:
parent
21d3d75827
commit
9913e1a774
21 changed files with 3993 additions and 0 deletions
|
@ -91,6 +91,20 @@ fluentd_image_tag: "{{ fluentd_version }}"
|
||||||
kibana_version: "v4.6.1"
|
kibana_version: "v4.6.1"
|
||||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||||
kibana_image_tag: "{{ kibana_version }}"
|
kibana_image_tag: "{{ kibana_version }}"
|
||||||
|
prometheus_operator_image_repo: "quay.io/coreos/prometheus-operator"
|
||||||
|
prometheus_operator_image_tag: "v0.3.0"
|
||||||
|
node_exporter_image_repo: "quay.io/prometheus/node-exporter"
|
||||||
|
node_exporter_image_tag: "v0.13.0"
|
||||||
|
kube_state_metrics_image_repo: "gcr.io/google_containers/kube-state-metrics"
|
||||||
|
kube_state_metrics_image_tag: "v0.3.0"
|
||||||
|
alertmanager_image_repo: "quay.io/prometheus/alertmanager"
|
||||||
|
alertmanager_image_tag: "v0.5.1"
|
||||||
|
grafana_image_repo: "grafana/grafana"
|
||||||
|
grafana_image_tag: "3.1.1"
|
||||||
|
grafana_watcher_image_repo: "quay.io/coreos/grafana-watcher"
|
||||||
|
grafana_watcher_image_tag: "latest"
|
||||||
|
prometheus_image_repo: "quay.io/prometheus/prometheus"
|
||||||
|
prometheus_image_tag: "v1.5.1"
|
||||||
|
|
||||||
downloads:
|
downloads:
|
||||||
netcheck_server:
|
netcheck_server:
|
||||||
|
@ -221,6 +235,42 @@ downloads:
|
||||||
repo: "{{ kibana_image_repo }}"
|
repo: "{{ kibana_image_repo }}"
|
||||||
tag: "{{ kibana_image_tag }}"
|
tag: "{{ kibana_image_tag }}"
|
||||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||||
|
prometheus_operator:
|
||||||
|
container: true
|
||||||
|
repo: "{{ prometheus_operator_image_repo }}"
|
||||||
|
tag: "{{ prometheus_operator_image_tag }}"
|
||||||
|
sha256: "{{ prometheus_operator_digest_checksum|default(None) }}"
|
||||||
|
node_exporter:
|
||||||
|
container: true
|
||||||
|
repo: "{{ node_exporter_image_repo }}"
|
||||||
|
tag: "{{ node_exporter_image_tag }}"
|
||||||
|
sha256: "{{ node_exporter_digest_checksum|default(None) }}"
|
||||||
|
kube_state_metrics:
|
||||||
|
container: true
|
||||||
|
repo: "{{ kube_state_metrics_image_repo }}"
|
||||||
|
tag: "{{ kube_state_metrics_image_tag }}"
|
||||||
|
sha256: "{{ kube_state_metrics_digest_checksum|default(None) }}"
|
||||||
|
alertmanager:
|
||||||
|
container: true
|
||||||
|
repo: "{{ alertmanager_image_repo }}"
|
||||||
|
tag: "{{ alertmanager_image_tag }}"
|
||||||
|
sha256: "{{ alertmanager_digest_checksum|default(None) }}"
|
||||||
|
grafana:
|
||||||
|
container: true
|
||||||
|
repo: "{{ grafana_image_repo }}"
|
||||||
|
tag: "{{ grafana_image_tag }}"
|
||||||
|
sha256: "{{ grafana_digest_checksum|default(None) }}"
|
||||||
|
grafana_watcher:
|
||||||
|
container: true
|
||||||
|
repo: "{{ grafana_watcher_image_repo }}"
|
||||||
|
tag: "{{ grafana_watcher_image_tag }}"
|
||||||
|
sha256: "{{ grafana_watcher_digest_checksum|default(None) }}"
|
||||||
|
prometheus:
|
||||||
|
container: true
|
||||||
|
repo: "{{ prometheus_image_repo }}"
|
||||||
|
tag: "{{ prometheus_image_tag }}"
|
||||||
|
sha256: "{{ prometheus_digest_checksum|default(None) }}"
|
||||||
|
|
||||||
|
|
||||||
download:
|
download:
|
||||||
container: "{{ file.container|default('false') }}"
|
container: "{{ file.container|default('false') }}"
|
||||||
|
|
|
@ -17,3 +17,6 @@ dependencies:
|
||||||
- role: kubernetes-apps/efk
|
- role: kubernetes-apps/efk
|
||||||
when: efk_enabled
|
when: efk_enabled
|
||||||
tags: [ apps, efk ]
|
tags: [ apps, efk ]
|
||||||
|
- role: kubernetes-apps/prometheus
|
||||||
|
when: prometheus_enabled
|
||||||
|
tags: [ apps, prometheus ]
|
||||||
|
|
79
roles/kubernetes-apps/prometheus/defaults/main.yml
Normal file
79
roles/kubernetes-apps/prometheus/defaults/main.yml
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
---
|
||||||
|
prometheus_namespace: monitoring
|
||||||
|
|
||||||
|
prometheus_resources:
|
||||||
|
- name: prometheus-operator
|
||||||
|
type: deployment
|
||||||
|
- name: kube-state-metrics
|
||||||
|
type: deployment
|
||||||
|
- name: kube-state-metrics
|
||||||
|
type: service
|
||||||
|
- name: node-exporter
|
||||||
|
type: daemonset
|
||||||
|
- name: node-exporter
|
||||||
|
type: service
|
||||||
|
- name: grafana-dashboards
|
||||||
|
type: configmap
|
||||||
|
- name: grafana
|
||||||
|
type: deployment
|
||||||
|
- name: grafana
|
||||||
|
type: service
|
||||||
|
- name: prometheus-k8s-rules
|
||||||
|
type: configmap
|
||||||
|
- name: prometheus-k8s
|
||||||
|
type: service
|
||||||
|
- name: alertmanager-main
|
||||||
|
type: configmap
|
||||||
|
- name: alertmanager-main
|
||||||
|
type: service
|
||||||
|
- name: k8s-apps-https
|
||||||
|
type: servicemonitor
|
||||||
|
- name: k8s-apps-http
|
||||||
|
type: servicemonitor
|
||||||
|
- name: k8s
|
||||||
|
type: prometheus
|
||||||
|
- name: main
|
||||||
|
type: alertmanager
|
||||||
|
|
||||||
|
prometheus_operator_cpu_limit: 200m
|
||||||
|
prometheus_operator_mem_limit: 300M
|
||||||
|
prometheus_operator_cpu_requests: 100m
|
||||||
|
prometheus_operator_mem_requests: 50M
|
||||||
|
|
||||||
|
node_exporter_ds_cpu_limit: 200m
|
||||||
|
node_exporter_ds_mem_limit: 50Mi
|
||||||
|
node_exporter_ds_cpu_requests: 100m
|
||||||
|
node_exporter_ds_mem_requests: 30Mi
|
||||||
|
|
||||||
|
kube_state_metrics_cpu_limit: 200m
|
||||||
|
kube_state_metrics_mem_limit: 50Mi
|
||||||
|
kube_state_metrics_cpu_requests: 100m
|
||||||
|
kube_state_metrics_mem_requests: 30Mi
|
||||||
|
|
||||||
|
alertmanager_config_receivers:
|
||||||
|
- name: webhook
|
||||||
|
webhook_configs:
|
||||||
|
- url: "http://alertmanagerwh:30500/"
|
||||||
|
alertmanager_config_global:
|
||||||
|
resolve_timeout: 5m
|
||||||
|
alertmanager_config_route:
|
||||||
|
group_by:
|
||||||
|
- job
|
||||||
|
group_wait: 30s
|
||||||
|
group_interval: 5m
|
||||||
|
repeat_interval: 12h
|
||||||
|
receiver: webhook
|
||||||
|
|
||||||
|
alertmanager_replicas: 3
|
||||||
|
|
||||||
|
grafana_cpu_limit: 300m
|
||||||
|
grafana_mem_limit: 300Mi
|
||||||
|
grafana_cpu_requests: 100m
|
||||||
|
grafana_mem_requests: 100Mi
|
||||||
|
|
||||||
|
grafana_watcher_cpu_limit: 100m
|
||||||
|
grafana_watcher_mem_limit: 32Mi
|
||||||
|
grafana_watcher_cpu_requests: 50m
|
||||||
|
grafana_watcher_mem_requests: 16Mi
|
||||||
|
|
||||||
|
prometheus_mem_requests: 400Mi
|
16
roles/kubernetes-apps/prometheus/meta/main.yml
Normal file
16
roles/kubernetes-apps/prometheus/meta/main.yml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.prometheus_operator }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.prometheus }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.node_exporter }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.kube_state_metrics }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.alertmanager }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.grafana }}"
|
||||||
|
- role: download
|
||||||
|
file: "{{ downloads.grafana_watcher }}"
|
61
roles/kubernetes-apps/prometheus/tasks/main.yml
Normal file
61
roles/kubernetes-apps/prometheus/tasks/main.yml
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
---
|
||||||
|
- name: "Prometheus | Check is Prometheus namespace exists"
|
||||||
|
command: "{{bin_dir}}/kubectl get ns {{ prometheus_namespace }}"
|
||||||
|
register: prom_ns_check
|
||||||
|
changed_when: False
|
||||||
|
failed_when: False
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: "Prometheus | Create Prometheus namespace"
|
||||||
|
command: "{{bin_dir}}/kubectl create namespace {{ prometheus_namespace }}"
|
||||||
|
changed_when: False
|
||||||
|
when: prom_ns_check|failed
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: "Prometheus | Create apps directory"
|
||||||
|
file:
|
||||||
|
path: "{{ kube_config_dir }}/apps"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: "Prometheus | Write prometheus manifests"
|
||||||
|
template:
|
||||||
|
src: "{{ item.name }}-{{ item.type }}.yml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/apps/{{ item.name }}-{{ item.type }}.yml"
|
||||||
|
register: prometheus_templates
|
||||||
|
with_items:
|
||||||
|
- "{{ prometheus_resources }}"
|
||||||
|
|
||||||
|
- name: "Prometheus | Create Prometheus Operator deployment"
|
||||||
|
kube:
|
||||||
|
filename: "{{ kube_config_dir }}/apps/{{ item.item.name }}-{{ item.item.type }}.yml"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
namespace: "{{ prometheus_namespace }}"
|
||||||
|
resource: "{{ item.item.type }}"
|
||||||
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
|
when: item.item.name == 'prometheus-operator'
|
||||||
|
with_items: "{{ prometheus_templates.results }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: "Prometheus | Wait for TPRs to become available"
|
||||||
|
command: "{{ bin_dir }}/kubectl get {{ item }} --namespace={{ prometheus_namespace }}"
|
||||||
|
register: tpr_available
|
||||||
|
until: tpr_available.rc == 0
|
||||||
|
retries: 10
|
||||||
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
with_items:
|
||||||
|
- servicemonitor
|
||||||
|
- prometheus
|
||||||
|
- alertmanager
|
||||||
|
|
||||||
|
- name: "Prometheus | Create prometheus resources"
|
||||||
|
kube:
|
||||||
|
filename: "{{ kube_config_dir }}/apps/{{ item.item.name }}-{{ item.item.type }}.yml"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
namespace: "{{ prometheus_namespace }}"
|
||||||
|
resource: "{{ item.item.type }}"
|
||||||
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
|
when: item.item.name != 'prometheus-operator'
|
||||||
|
with_items: "{{ prometheus_templates.results }}"
|
||||||
|
run_once: true
|
|
@ -0,0 +1,13 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: alertmanager-main
|
||||||
|
creationTimestamp: null
|
||||||
|
data:
|
||||||
|
alertmanager.yaml: |-
|
||||||
|
global:
|
||||||
|
{{ alertmanager_config_global | to_nice_yaml | indent(6, False) }}
|
||||||
|
route:
|
||||||
|
{{ alertmanager_config_route | to_nice_yaml | indent(6, False)}}
|
||||||
|
receivers:
|
||||||
|
{{ alertmanager_config_receivers | to_nice_yaml | indent(6, False) }}
|
|
@ -0,0 +1,14 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: alertmanager-main
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: web
|
||||||
|
nodePort: 30903
|
||||||
|
port: 9093
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: web
|
||||||
|
selector:
|
||||||
|
alertmanager: main
|
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,58 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: grafana
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: grafana
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: grafana
|
||||||
|
image: {{ grafana_image_repo }}:{{ grafana_image_tag }}
|
||||||
|
env:
|
||||||
|
- name: GF_AUTH_BASIC_ENABLED
|
||||||
|
value: "true"
|
||||||
|
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||||
|
value: "true"
|
||||||
|
volumeMounts:
|
||||||
|
- name: grafana-storage
|
||||||
|
mountPath: /var/grafana-storage
|
||||||
|
ports:
|
||||||
|
- name: web
|
||||||
|
containerPort: 3000
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: {{ grafana_mem_requests }}
|
||||||
|
cpu: {{ grafana_cpu_requests }}
|
||||||
|
limits:
|
||||||
|
memory: {{ grafana_mem_limit }}
|
||||||
|
cpu: {{ grafana_cpu_limit }}
|
||||||
|
- name: grafana-watcher
|
||||||
|
image: {{ grafana_watcher_image_repo }}:{{ grafana_watcher_image_tag }}
|
||||||
|
args:
|
||||||
|
- '--watch-dir=/var/grafana-dashboards'
|
||||||
|
- '--grafana-url=http://admin:admin@localhost:3000'
|
||||||
|
volumeMounts:
|
||||||
|
- name: grafana-dashboards
|
||||||
|
mountPath: /var/grafana-dashboards
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: {{ grafana_watcher_mem_requests }}
|
||||||
|
cpu: {{ grafana_watcher_cpu_requests }}
|
||||||
|
limits:
|
||||||
|
memory: {{ grafana_watcher_mem_limit }}
|
||||||
|
cpu: {{ grafana_watcher_cpu_limit }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: grafana-dashboards
|
||||||
|
mountPath: /var/grafana-dashboards
|
||||||
|
volumes:
|
||||||
|
- name: grafana-storage
|
||||||
|
emptyDir: {}
|
||||||
|
- name: grafana-dashboards
|
||||||
|
configMap:
|
||||||
|
name: grafana-dashboards
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: grafana
|
||||||
|
labels:
|
||||||
|
app: grafana
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: web
|
||||||
|
port: 3000
|
||||||
|
protocol: TCP
|
||||||
|
nodePort: 30902
|
||||||
|
selector:
|
||||||
|
app: grafana
|
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
apiVersion: monitoring.coreos.com/v1alpha1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: k8s-apps-http
|
||||||
|
labels:
|
||||||
|
k8s-apps: http
|
||||||
|
spec:
|
||||||
|
jobLabel: k8s-app
|
||||||
|
selector:
|
||||||
|
matchExpressions:
|
||||||
|
- {key: k8s-app, operator: Exists}
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- kube-system
|
||||||
|
- monitoring
|
||||||
|
endpoints:
|
||||||
|
- port: http-metrics
|
||||||
|
interval: 15s
|
||||||
|
- port: http-metrics-dnsmasq
|
||||||
|
interval: 15s
|
||||||
|
- port: http-metrics-skydns
|
||||||
|
interval: 15s
|
|
@ -0,0 +1,22 @@
|
||||||
|
apiVersion: monitoring.coreos.com/v1alpha1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: k8s-apps-https
|
||||||
|
labels:
|
||||||
|
k8s-apps: https
|
||||||
|
spec:
|
||||||
|
jobLabel: k8s-app
|
||||||
|
selector:
|
||||||
|
matchExpressions:
|
||||||
|
- {key: k8s-app, operator: Exists}
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- kube-system
|
||||||
|
endpoints:
|
||||||
|
- port: https-metrics
|
||||||
|
interval: 15s
|
||||||
|
scheme: https
|
||||||
|
tlsConfig:
|
||||||
|
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||||
|
insecureSkipVerify: true
|
||||||
|
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
|
@ -0,0 +1,25 @@
|
||||||
|
apiVersion: monitoring.coreos.com/v1alpha1
|
||||||
|
kind: Prometheus
|
||||||
|
metadata:
|
||||||
|
name: k8s
|
||||||
|
labels:
|
||||||
|
prometheus: k8s
|
||||||
|
spec:
|
||||||
|
version: "{{ prometheus_image_tag }}"
|
||||||
|
baseImage: "{{ prometheus_image_repo }}"
|
||||||
|
serviceMonitorSelector:
|
||||||
|
matchExpression:
|
||||||
|
- {key: k8s-apps, operator: Exists}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
# 2Gi is default, but won't schedule if you don't have a node with >2Gi
|
||||||
|
# memory. Modify based on your target and time-series count for
|
||||||
|
# production use. This value is mainly meant for demonstration/testing
|
||||||
|
# purposes.
|
||||||
|
memory: {{ prometheus_mem_requests }}
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- namespace: monitoring
|
||||||
|
name: alertmanager-main
|
||||||
|
port: web
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: kube-state-metrics
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: kube-state-metrics
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kube-state-metrics
|
||||||
|
image: "{{ kube_state_metrics_image_repo }}:{{ kube_state_metrics_image_tag }}"
|
||||||
|
ports:
|
||||||
|
- name: metrics
|
||||||
|
containerPort: 8080
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: {{ kube_state_metrics_mem_requests }}
|
||||||
|
cpu: {{ kube_state_metrics_cpu_requests }}
|
||||||
|
limits:
|
||||||
|
memory: {{ kube_state_metrics_mem_limit }}
|
||||||
|
cpu: {{ kube_state_metrics_cpu_limit }}
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: kube-state-metrics
|
||||||
|
k8s-app: kube-state-metrics
|
||||||
|
name: kube-state-metrics
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: http-metrics
|
||||||
|
port: 8080
|
||||||
|
targetPort: metrics
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app: kube-state-metrics
|
|
@ -0,0 +1,10 @@
|
||||||
|
apiVersion: "monitoring.coreos.com/v1alpha1"
|
||||||
|
kind: "Alertmanager"
|
||||||
|
metadata:
|
||||||
|
name: "main"
|
||||||
|
labels:
|
||||||
|
alertmanager: "main"
|
||||||
|
spec:
|
||||||
|
replicas: {{ alertmanager_replicas }}
|
||||||
|
version: {{ alertmanager_image_tag }}
|
||||||
|
baseImage: {{ alertmanager_image_repo }}
|
|
@ -0,0 +1,44 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: node-exporter
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: node-exporter
|
||||||
|
name: node-exporter
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
containers:
|
||||||
|
- image: "{{ node_exporter_image_repo }}:{{ node_exporter_image_tag }}"
|
||||||
|
args:
|
||||||
|
- "-collector.procfs=/host/proc"
|
||||||
|
- "-collector.sysfs=/host/sys"
|
||||||
|
name: node-exporter
|
||||||
|
ports:
|
||||||
|
- containerPort: 9100
|
||||||
|
hostPort: 9100
|
||||||
|
name: scrape
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: {{ node_exporter_ds_mem_requests }}
|
||||||
|
cpu: {{ node_exporter_ds_cpu_requests }}
|
||||||
|
limits:
|
||||||
|
memory: {{ node_exporter_ds_mem_limit }}
|
||||||
|
cpu: {{ node_exporter_ds_cpu_limit }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: proc
|
||||||
|
readOnly: true
|
||||||
|
mountPath: /host/proc
|
||||||
|
- name: sys
|
||||||
|
readOnly: true
|
||||||
|
mountPath: /host/sys
|
||||||
|
volumes:
|
||||||
|
- name: proc
|
||||||
|
hostPath:
|
||||||
|
path: /proc
|
||||||
|
- name: sys
|
||||||
|
hostPath:
|
||||||
|
path: /sys
|
|
@ -0,0 +1,16 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: node-exporter
|
||||||
|
k8s-app: node-exporter
|
||||||
|
name: node-exporter
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- name: http-metrics
|
||||||
|
port: 9100
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app: node-exporter
|
|
@ -0,0 +1,449 @@
|
||||||
|
#jinja2:variable_start_string:'[[' , variable_end_string:']]', block_start_string:'[%' , block_end_string:'%]'
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
etcd2.rules: "### General cluster availability ###\n\n# alert if another failed
|
||||||
|
peer will result in an unavailable cluster\nALERT InsufficientPeers\n IF count(up{job=\"etcd-k8s\"}
|
||||||
|
== 0) > (count(up{job=\"etcd-k8s\"}) / 2 - 1)\n FOR 3m\n LABELS {\n severity
|
||||||
|
= \"critical\"\n }\n ANNOTATIONS {\n summary = \"Etcd cluster small\",\n
|
||||||
|
\ description = \"If one more etcd peer goes down the cluster will be unavailable\",\n
|
||||||
|
\ }\n\n### HTTP requests alerts ###\n\n# alert if more than 1% of requests to
|
||||||
|
an HTTP endpoint have failed with a non 4xx response\nALERT HighNumberOfFailedHTTPRequests\n
|
||||||
|
\ IF sum by(method) (rate(etcd_http_failed_total{job=\"etcd-k8s\", code!~\"4[0-9]{2}\"}[5m]))\n
|
||||||
|
\ / sum by(method) (rate(etcd_http_received_total{job=\"etcd-k8s\"}[5m])) >
|
||||||
|
0.01\n FOR 10m\n LABELS {\n severity = \"warning\"\n }\n ANNOTATIONS {\n
|
||||||
|
\ summary = \"a high number of HTTP requests are failing\",\n description
|
||||||
|
= \"{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
|
||||||
|
{{ $labels.instance }}\",\n }\n\n# alert if more than 5% of requests to an HTTP
|
||||||
|
endpoint have failed with a non 4xx response\nALERT HighNumberOfFailedHTTPRequests\n
|
||||||
|
\ IF sum by(method) (rate(etcd_http_failed_total{job=\"etcd-k8s\", code!~\"4[0-9]{2}\"}[5m]))
|
||||||
|
\n / sum by(method) (rate(etcd_http_received_total{job=\"etcd-k8s\"}[5m]))
|
||||||
|
> 0.05\n FOR 5m\n LABELS {\n severity = \"critical\"\n }\n ANNOTATIONS
|
||||||
|
{\n summary = \"a high number of HTTP requests are failing\",\n description
|
||||||
|
= \"{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
|
||||||
|
{{ $labels.instance }}\",\n }\n\n# alert if 50% of requests get a 4xx response\nALERT
|
||||||
|
HighNumberOfFailedHTTPRequests\n IF sum by(method) (rate(etcd_http_failed_total{job=\"etcd-k8s\",
|
||||||
|
code=~\"4[0-9]{2}\"}[5m]))\n / sum by(method) (rate(etcd_http_received_total{job=\"etcd-k8s\"}[5m]))
|
||||||
|
> 0.5\n FOR 10m\n LABELS {\n severity = \"critical\"\n }\n ANNOTATIONS
|
||||||
|
{\n summary = \"a high number of HTTP requests are failing\",\n description
|
||||||
|
= \"{{ $value }}% of requests for {{ $labels.method }} failed with 4xx responses
|
||||||
|
on etcd instance {{ $labels.instance }}\",\n }\n\n# alert if the 99th percentile
|
||||||
|
of HTTP requests take more than 150ms\nALERT HTTPRequestsSlow\n IF histogram_quantile(0.99,
|
||||||
|
rate(etcd_http_successful_duration_second_bucket[5m])) > 0.15\n FOR 10m\n LABELS
|
||||||
|
{\n severity = \"warning\"\n }\n ANNOTATIONS {\n summary = \"slow HTTP
|
||||||
|
requests\",\n description = \"on ectd instance {{ $labels.instance }} HTTP
|
||||||
|
requests to {{ $label.method }} are slow\",\n }\n\n### File descriptor alerts
|
||||||
|
###\n\ninstance:fd_utilization = process_open_fds / process_max_fds\n\n# alert
|
||||||
|
if file descriptors are likely to exhaust within the next 4 hours\nALERT FdExhaustionClose\n
|
||||||
|
\ IF predict_linear(instance:fd_utilization[1h], 3600 * 4) > 1\n FOR 10m\n LABELS
|
||||||
|
{\n severity = \"warning\"\n }\n ANNOTATIONS {\n summary = \"file descriptors
|
||||||
|
soon exhausted\",\n description = \"{{ $labels.job }} instance {{ $labels.instance
|
||||||
|
}} will exhaust in file descriptors soon\",\n }\n\n# alert if file descriptors
|
||||||
|
are likely to exhaust within the next hour\nALERT FdExhaustionClose\n IF predict_linear(instance:fd_utilization[10m],
|
||||||
|
3600) > 1\n FOR 10m\n LABELS {\n severity = \"critical\"\n }\n ANNOTATIONS
|
||||||
|
{\n summary = \"file descriptors soon exhausted\",\n description = \"{{
|
||||||
|
$labels.job }} instance {{ $labels.instance }} will exhaust in file descriptors
|
||||||
|
soon\",\n }\n\n### etcd proposal alerts ###\n\n# alert if there are several failed
|
||||||
|
proposals within an hour\nALERT HighNumberOfFailedProposals\n IF increase(etcd_server_proposal_failed_total{job=\"etcd\"}[1h])
|
||||||
|
> 5\n LABELS {\n severity = \"warning\"\n }\n ANNOTATIONS {\n summary
|
||||||
|
= \"a high number of failed proposals within the etcd cluster are happening\",\n
|
||||||
|
\ description = \"etcd instance {{ $labels.instance }} has seen {{ $value }}
|
||||||
|
proposal failures within the last hour\",\n }\n\n### etcd disk io latency alerts
|
||||||
|
###\n\n# alert if 99th percentile of fsync durations is higher than 500ms\nALERT
|
||||||
|
HighFsyncDurations\n IF histogram_quantile(0.99, rate(etcd_wal_fsync_durations_seconds_bucket[5m]))
|
||||||
|
> 0.5\n FOR 10m\n LABELS {\n severity = \"warning\"\n }\n ANNOTATIONS {\n
|
||||||
|
\ summary = \"high fsync durations\",\n description = \"ectd instance {{
|
||||||
|
$labels.instance }} fync durations are high\",\n }\n"
|
||||||
|
kubernetes.rules: |+
|
||||||
|
# NOTE: These rules were kindly contributed by the SoundCloud engineering team.
|
||||||
|
|
||||||
|
### Container resources ###
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:spec_memory_limit_bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_spec_memory_limit_bytes{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:spec_cpu_shares =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_spec_cpu_shares{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:cpu_usage:rate =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
irate(
|
||||||
|
container_cpu_usage_seconds_total{container_name!=""}[5m]
|
||||||
|
),
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_usage:bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_memory_usage_bytes{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_working_set:bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_memory_working_set_bytes{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_rss:bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_memory_rss{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_cache:bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_memory_cache{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:disk_usage:bytes =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name) (
|
||||||
|
label_replace(
|
||||||
|
container_disk_usage_bytes{container_name!=""},
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_pagefaults:rate =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name,scope,type) (
|
||||||
|
label_replace(
|
||||||
|
irate(
|
||||||
|
container_memory_failures_total{container_name!=""}[5m]
|
||||||
|
),
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster_namespace_controller_pod_container:memory_oom:rate =
|
||||||
|
sum by (cluster,namespace,controller,pod_name,container_name,scope,type) (
|
||||||
|
label_replace(
|
||||||
|
irate(
|
||||||
|
container_memory_failcnt{container_name!=""}[5m]
|
||||||
|
),
|
||||||
|
"controller", "$1",
|
||||||
|
"pod_name", "^(.*)-[a-z0-9]+"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
### Cluster resources ###
|
||||||
|
|
||||||
|
cluster:memory_allocation:percent =
|
||||||
|
100 * sum by (cluster) (
|
||||||
|
container_spec_memory_limit_bytes{pod_name!=""}
|
||||||
|
) / sum by (cluster) (
|
||||||
|
machine_memory_bytes
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster:memory_used:percent =
|
||||||
|
100 * sum by (cluster) (
|
||||||
|
container_memory_usage_bytes{pod_name!=""}
|
||||||
|
) / sum by (cluster) (
|
||||||
|
machine_memory_bytes
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster:cpu_allocation:percent =
|
||||||
|
100 * sum by (cluster) (
|
||||||
|
container_spec_cpu_shares{pod_name!=""}
|
||||||
|
) / sum by (cluster) (
|
||||||
|
container_spec_cpu_shares{id="/"} * on(cluster,instance) machine_cpu_cores
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster:node_cpu_use:percent =
|
||||||
|
100 * sum by (cluster) (
|
||||||
|
rate(node_cpu{mode!="idle"}[5m])
|
||||||
|
) / sum by (cluster) (
|
||||||
|
machine_cpu_cores
|
||||||
|
)
|
||||||
|
|
||||||
|
### API latency ###
|
||||||
|
|
||||||
|
# Raw metrics are in microseconds. Convert to seconds.
|
||||||
|
cluster_resource_verb:apiserver_latency:quantile_seconds{quantile="0.99"} =
|
||||||
|
histogram_quantile(
|
||||||
|
0.99,
|
||||||
|
sum by(le,cluster,job,resource,verb) (apiserver_request_latencies_bucket)
|
||||||
|
) / 1e6
|
||||||
|
cluster_resource_verb:apiserver_latency:quantile_seconds{quantile="0.9"} =
|
||||||
|
histogram_quantile(
|
||||||
|
0.9,
|
||||||
|
sum by(le,cluster,job,resource,verb) (apiserver_request_latencies_bucket)
|
||||||
|
) / 1e6
|
||||||
|
cluster_resource_verb:apiserver_latency:quantile_seconds{quantile="0.5"} =
|
||||||
|
histogram_quantile(
|
||||||
|
0.5,
|
||||||
|
sum by(le,cluster,job,resource,verb) (apiserver_request_latencies_bucket)
|
||||||
|
) / 1e6
|
||||||
|
|
||||||
|
### Scheduling latency ###
|
||||||
|
|
||||||
|
cluster:scheduler_e2e_scheduling_latency:quantile_seconds{quantile="0.99"} =
|
||||||
|
histogram_quantile(0.99,sum by (le,cluster) (scheduler_e2e_scheduling_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_e2e_scheduling_latency:quantile_seconds{quantile="0.9"} =
|
||||||
|
histogram_quantile(0.9,sum by (le,cluster) (scheduler_e2e_scheduling_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_e2e_scheduling_latency:quantile_seconds{quantile="0.5"} =
|
||||||
|
histogram_quantile(0.5,sum by (le,cluster) (scheduler_e2e_scheduling_latency_microseconds_bucket)) / 1e6
|
||||||
|
|
||||||
|
cluster:scheduler_scheduling_algorithm_latency:quantile_seconds{quantile="0.99"} =
|
||||||
|
histogram_quantile(0.99,sum by (le,cluster) (scheduler_scheduling_algorithm_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_scheduling_algorithm_latency:quantile_seconds{quantile="0.9"} =
|
||||||
|
histogram_quantile(0.9,sum by (le,cluster) (scheduler_scheduling_algorithm_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_scheduling_algorithm_latency:quantile_seconds{quantile="0.5"} =
|
||||||
|
histogram_quantile(0.5,sum by (le,cluster) (scheduler_scheduling_algorithm_latency_microseconds_bucket)) / 1e6
|
||||||
|
|
||||||
|
cluster:scheduler_binding_latency:quantile_seconds{quantile="0.99"} =
|
||||||
|
histogram_quantile(0.99,sum by (le,cluster) (scheduler_binding_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_binding_latency:quantile_seconds{quantile="0.9"} =
|
||||||
|
histogram_quantile(0.9,sum by (le,cluster) (scheduler_binding_latency_microseconds_bucket)) / 1e6
|
||||||
|
cluster:scheduler_binding_latency:quantile_seconds{quantile="0.5"} =
|
||||||
|
histogram_quantile(0.5,sum by (le,cluster) (scheduler_binding_latency_microseconds_bucket)) / 1e6
|
||||||
|
|
||||||
|
ALERT K8SNodeDown
|
||||||
|
IF up{job="kubelet"} == 0
|
||||||
|
FOR 1h
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Kubelet cannot be scraped",
|
||||||
|
description = "Prometheus could not scrape a {{ $labels.job }} for more than one hour",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SNodeNotReady
|
||||||
|
IF kube_node_status_ready{condition="true"} == 0
|
||||||
|
FOR 1h
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Node status is NotReady",
|
||||||
|
description = "The Kubelet on {{ $labels.node }} has not checked in with the API, or has set itself to NotReady, for more than an hour",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SManyNodesNotReady
|
||||||
|
IF
|
||||||
|
count by (cluster) (kube_node_status_ready{condition="true"} == 0) > 1
|
||||||
|
AND
|
||||||
|
(
|
||||||
|
count by (cluster) (kube_node_status_ready{condition="true"} == 0)
|
||||||
|
/
|
||||||
|
count by (cluster) (kube_node_status_ready{condition="true"})
|
||||||
|
) > 0.2
|
||||||
|
FOR 1m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Many K8s nodes are Not Ready",
|
||||||
|
description = "{{ $value }} K8s nodes (more than 10% of cluster {{ $labels.cluster }}) are in the NotReady state.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SKubeletNodeExporterDown
|
||||||
|
IF up{job="node-exporter"} == 0
|
||||||
|
FOR 15m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Kubelet node_exporter cannot be scraped",
|
||||||
|
description = "Prometheus could not scrape a {{ $labels.job }} for more than one hour.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SKubeletDown
|
||||||
|
IF absent(up{job="kubelet"}) or count by (cluster) (up{job="kubelet"} == 0) / count by (cluster) (up{job="kubelet"}) > 0.1
|
||||||
|
FOR 1h
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Many Kubelets cannot be scraped",
|
||||||
|
description = "Prometheus failed to scrape more than 10% of kubelets, or all Kubelets have disappeared from service discovery.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SApiserverDown
|
||||||
|
IF up{job="kubernetes"} == 0
|
||||||
|
FOR 15m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "API server unreachable",
|
||||||
|
description = "An API server could not be scraped.",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disable for non HA kubernetes setups.
|
||||||
|
ALERT K8SApiserverDown
|
||||||
|
IF absent({job="kubernetes"}) or (count by(cluster) (up{job="kubernetes"} == 1) < count by(cluster) (up{job="kubernetes"}))
|
||||||
|
FOR 5m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "API server unreachable",
|
||||||
|
description = "Prometheus failed to scrape multiple API servers, or all API servers have disappeared from service discovery.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SSchedulerDown
|
||||||
|
IF absent(up{job="kube-scheduler"}) or (count by(cluster) (up{job="kube-scheduler"} == 1) == 0)
|
||||||
|
FOR 5m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Scheduler is down",
|
||||||
|
description = "There is no running K8S scheduler. New pods are not being assigned to nodes.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SControllerManagerDown
|
||||||
|
IF absent(up{job="kube-controller-manager"}) or (count by(cluster) (up{job="kube-controller-manager"} == 1) == 0)
|
||||||
|
FOR 5m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Controller manager is down",
|
||||||
|
description = "There is no running K8S controller manager. Deployments and replication controllers are not making progress.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SConntrackTableFull
|
||||||
|
IF 100*node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 50
|
||||||
|
FOR 10m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Number of tracked connections is near the limit",
|
||||||
|
description = "The nf_conntrack table is {{ $value }}% full.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SConntrackTableFull
|
||||||
|
IF 100*node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 90
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Number of tracked connections is near the limit",
|
||||||
|
description = "The nf_conntrack table is {{ $value }}% full.",
|
||||||
|
}
|
||||||
|
|
||||||
|
# To catch the conntrack sysctl de-tuning when it happens
|
||||||
|
ALERT K8SConntrackTuningMissing
|
||||||
|
IF node_nf_conntrack_udp_timeout > 10
|
||||||
|
FOR 10m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Node does not have the correct conntrack tunings",
|
||||||
|
description = "Nodes keep un-setting the correct tunings, investigate when it happens.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8STooManyOpenFiles
|
||||||
|
IF 100*process_open_fds{job=~"kubelet|kubernetes"} / process_max_fds > 50
|
||||||
|
FOR 10m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "{{ $labels.job }} has too many open file descriptors",
|
||||||
|
description = "{{ $labels.node }} is using {{ $value }}% of the available file/socket descriptors.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8STooManyOpenFiles
|
||||||
|
IF 100*process_open_fds{job=~"kubelet|kubernetes"} / process_max_fds > 80
|
||||||
|
FOR 10m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "critical"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "{{ $labels.job }} has too many open file descriptors",
|
||||||
|
description = "{{ $labels.node }} is using {{ $value }}% of the available file/socket descriptors.",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Some verbs excluded because they are expected to be long-lasting:
|
||||||
|
# WATCHLIST is long-poll, CONNECT is `kubectl exec`.
|
||||||
|
ALERT K8SApiServerLatency
|
||||||
|
IF histogram_quantile(
|
||||||
|
0.99,
|
||||||
|
sum without (instance,node,resource) (apiserver_request_latencies_bucket{verb!~"CONNECT|WATCHLIST|WATCH"})
|
||||||
|
) / 1e6 > 1.0
|
||||||
|
FOR 10m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Kubernetes apiserver latency is high",
|
||||||
|
description = "99th percentile Latency for {{ $labels.verb }} requests to the kube-apiserver is higher than 1s.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SApiServerEtcdAccessLatency
|
||||||
|
IF etcd_request_latencies_summary{quantile="0.99"} / 1e6 > 1.0
|
||||||
|
FOR 15m
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning"
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Access to etcd is slow",
|
||||||
|
description = "99th percentile latency for apiserver to access etcd is higher than 1s.",
|
||||||
|
}
|
||||||
|
|
||||||
|
ALERT K8SKubeletTooManyPods
|
||||||
|
IF kubelet_running_pod_count > 100
|
||||||
|
LABELS {
|
||||||
|
service = "k8s",
|
||||||
|
severity = "warning",
|
||||||
|
}
|
||||||
|
ANNOTATIONS {
|
||||||
|
summary = "Kubelet is close to pod limit",
|
||||||
|
description = "Kubelet {{$labels.instance}} is running {{$value}} pods, close to the limit of 110",
|
||||||
|
}
|
||||||
|
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: prometheus-k8s-rules
|
|
@ -0,0 +1,14 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: prometheus-k8s
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: web
|
||||||
|
nodePort: 30900
|
||||||
|
port: 9090
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: web
|
||||||
|
selector:
|
||||||
|
prometheus: k8s
|
|
@ -0,0 +1,23 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: prometheus-operator
|
||||||
|
labels:
|
||||||
|
operator: prometheus
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
operator: prometheus
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: prometheus-operator
|
||||||
|
image: "{{ prometheus_operator_image_repo }}:{{ prometheus_operator_image_tag }}"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: {{ prometheus_operator_cpu_requests }}
|
||||||
|
memory: {{ prometheus_operator_mem_requests }}
|
||||||
|
limits:
|
||||||
|
cpu: {{ prometheus_operator_cpu_limit }}
|
||||||
|
memory: {{ prometheus_operator_mem_limit }}
|
Loading…
Reference in a new issue