Apply RBAC to efk and create fluentd.conf
Making fluentd.conf as configmap to change configuration. Change elasticsearch rc to deployment. Having installed previous elastaicsearch as rc, first should delete that.
This commit is contained in:
parent
460b5824c3
commit
b22bef5cfb
9 changed files with 412 additions and 42 deletions
|
@ -1,4 +1,22 @@
|
||||||
---
|
---
|
||||||
|
- name: "ElasticSearch | Write efk manifests (RBAC)"
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "efk-sa.yml"
|
||||||
|
- "efk-clusterrolebinding.yml"
|
||||||
|
run_once: true
|
||||||
|
when: rbac_enabled
|
||||||
|
|
||||||
|
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||||
|
command: "kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
||||||
|
with_items:
|
||||||
|
- "efk-sa.yml"
|
||||||
|
- "efk-clusterrolebinding.yml"
|
||||||
|
run_once: true
|
||||||
|
when: rbac_enabled
|
||||||
|
|
||||||
- name: "ElasticSearch | Write ES deployment"
|
- name: "ElasticSearch | Write ES deployment"
|
||||||
template:
|
template:
|
||||||
src: elasticsearch-deployment.yml.j2
|
src: elasticsearch-deployment.yml.j2
|
||||||
|
@ -6,15 +24,9 @@
|
||||||
register: es_deployment_manifest
|
register: es_deployment_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES deployment"
|
- name: "ElasticSearch | Create ES deployment"
|
||||||
kube:
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
|
||||||
filename: "{{kube_config_dir}}/elasticsearch-deployment.yaml"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
name: "elasticsearch-logging-v1"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
resource: "rc"
|
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ es_deployment_manifest.changed }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
when: es_deployment_manifest.changed
|
||||||
|
|
||||||
- name: "ElasticSearch | Write ES service "
|
- name: "ElasticSearch | Write ES service "
|
||||||
template:
|
template:
|
||||||
|
@ -23,12 +35,7 @@
|
||||||
register: es_service_manifest
|
register: es_service_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES service"
|
- name: "ElasticSearch | Create ES service"
|
||||||
kube:
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
||||||
filename: "{{kube_config_dir}}/elasticsearch-service.yaml"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
name: "elasticsearch-logging"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
resource: "svc"
|
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ es_service_manifest.changed }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
when: es_service_manifest.changed
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: efk
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: efk
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: efk
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
||||||
apiVersion: v1
|
apiVersion: extensions/v1beta1
|
||||||
kind: ReplicationController
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging-v1
|
name: elasticsearch-logging-v1
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "{{ system_namespace }}"
|
||||||
|
@ -12,6 +12,7 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
selector:
|
selector:
|
||||||
|
matchLabels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
version: "{{ elasticsearch_image_tag }}"
|
version: "{{ elasticsearch_image_tag }}"
|
||||||
template:
|
template:
|
||||||
|
@ -49,3 +50,7 @@ spec:
|
||||||
volumes:
|
volumes:
|
||||||
- name: es-persistent-storage
|
- name: es-persistent-storage
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: efk
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -3,3 +3,5 @@ fluentd_cpu_limit: 0m
|
||||||
fluentd_mem_limit: 200Mi
|
fluentd_mem_limit: 200Mi
|
||||||
fluentd_cpu_requests: 100m
|
fluentd_cpu_requests: 100m
|
||||||
fluentd_mem_requests: 200Mi
|
fluentd_mem_requests: 200Mi
|
||||||
|
fluentd_config_dir: /etc/kubernetes/fluentd
|
||||||
|
fluentd_config_file: fluentd.conf
|
||||||
|
|
|
@ -1,28 +1,23 @@
|
||||||
---
|
---
|
||||||
|
- name: "Fluentd | copy config file"
|
||||||
|
template:
|
||||||
|
src: fluentd-config.yml.j2
|
||||||
|
dest: "{{ kube_config_dir }}/fluentd-config.yaml"
|
||||||
|
register: fluentd_config
|
||||||
|
|
||||||
|
- name: "Fluentd | create configMap"
|
||||||
|
command: "{{bin_dir}}/kubectl apply -f {{ kube_config_dir }}/fluentd-config.yaml"
|
||||||
|
run_once: true
|
||||||
|
when: fluentd_config.changed
|
||||||
|
|
||||||
- name: "Fluentd | Write fluentd daemonset"
|
- name: "Fluentd | Write fluentd daemonset"
|
||||||
template:
|
template:
|
||||||
src: fluentd-ds.yml.j2
|
src: fluentd-ds.yml.j2
|
||||||
dest: "{{ kube_config_dir }}/fluentd-ds.yaml"
|
dest: "{{ kube_config_dir }}/fluentd-ds.yaml"
|
||||||
register: fluentd_ds_manifest
|
register: fluentd_ds_manifest
|
||||||
|
|
||||||
#FIXME: remove if kubernetes/features#124 is implemented
|
|
||||||
- name: "Fluentd | Purge old fluentd daemonset"
|
|
||||||
kube:
|
|
||||||
filename: "{{kube_config_dir}}/fluentd-ds.yaml"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
name: "fluentd-es-v{{ fluentd_version }}"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
resource: "ds"
|
|
||||||
state: absent
|
|
||||||
when: inventory_hostname == groups['kube-master'][0] and fluentd_ds_manifest.changed
|
|
||||||
|
|
||||||
- name: "Fluentd | Create fluentd daemonset"
|
- name: "Fluentd | Create fluentd daemonset"
|
||||||
kube:
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
||||||
filename: "{{kube_config_dir}}/fluentd-ds.yaml"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
name: "fluentd-es-v{{ fluentd_version }}"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
resource: "ds"
|
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ fluentd_ds_manifest.changed }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
when: fluentd_ds_manifest.changed
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,328 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: fluentd-config
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
|
data:
|
||||||
|
{{ fluentd_config_file }}: |
|
||||||
|
# This configuration file for Fluentd / td-agent is used
|
||||||
|
# to watch changes to Docker log files. The kubelet creates symlinks that
|
||||||
|
# capture the pod name, namespace, container name & Docker container ID
|
||||||
|
# to the docker logs for pods in the /var/log/containers directory on the host.
|
||||||
|
# If running this fluentd configuration in a Docker container, the /var/log
|
||||||
|
# directory should be mounted in the container.
|
||||||
|
#
|
||||||
|
# These logs are then submitted to Elasticsearch which assumes the
|
||||||
|
# installation of the fluent-plugin-elasticsearch & the
|
||||||
|
# fluent-plugin-kubernetes_metadata_filter plugins.
|
||||||
|
# See https://github.com/uken/fluent-plugin-elasticsearch &
|
||||||
|
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
|
||||||
|
# more information about the plugins.
|
||||||
|
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
|
||||||
|
#
|
||||||
|
# Example
|
||||||
|
# =======
|
||||||
|
# A line in the Docker log file might look like this JSON:
|
||||||
|
#
|
||||||
|
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||||
|
# "stream":"stderr",
|
||||||
|
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||||
|
#
|
||||||
|
# The time_format specification below makes sure we properly
|
||||||
|
# parse the time format produced by Docker. This will be
|
||||||
|
# submitted to Elasticsearch and should appear like:
|
||||||
|
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
|
||||||
|
# ...
|
||||||
|
# {
|
||||||
|
# "_index" : "logstash-2014.09.25",
|
||||||
|
# "_type" : "fluentd",
|
||||||
|
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
|
||||||
|
# "_score" : 1.0,
|
||||||
|
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
|
||||||
|
# "stream":"stderr","tag":"docker.container.all",
|
||||||
|
# "@timestamp":"2014-09-25T22:45:50+00:00"}
|
||||||
|
# },
|
||||||
|
# ...
|
||||||
|
#
|
||||||
|
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
|
||||||
|
# record & add labels to the log record if properly configured. This enables users
|
||||||
|
# to filter & search logs on any metadata.
|
||||||
|
# For example a Docker container's logs might be in the directory:
|
||||||
|
#
|
||||||
|
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
|
||||||
|
#
|
||||||
|
# and in the file:
|
||||||
|
#
|
||||||
|
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||||
|
#
|
||||||
|
# where 997599971ee6... is the Docker ID of the running container.
|
||||||
|
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
|
||||||
|
# in the /var/log/containers directory which includes the pod name and the Kubernetes
|
||||||
|
# container name:
|
||||||
|
#
|
||||||
|
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||||
|
# ->
|
||||||
|
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
|
||||||
|
#
|
||||||
|
# The /var/log directory on the host is mapped to the /var/log directory in the container
|
||||||
|
# running this instance of Fluentd and we end up collecting the file:
|
||||||
|
#
|
||||||
|
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||||
|
#
|
||||||
|
# This results in the tag:
|
||||||
|
#
|
||||||
|
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||||
|
#
|
||||||
|
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
|
||||||
|
# which are added to the log message as a kubernetes field object & the Docker container ID
|
||||||
|
# is also added under the docker field object.
|
||||||
|
# The final tag is:
|
||||||
|
#
|
||||||
|
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
|
||||||
|
#
|
||||||
|
# And the final log record look like:
|
||||||
|
#
|
||||||
|
# {
|
||||||
|
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||||
|
# "stream":"stderr",
|
||||||
|
# "time":"2014-09-25T21:15:03.499185026Z",
|
||||||
|
# "kubernetes": {
|
||||||
|
# "namespace": "default",
|
||||||
|
# "pod_name": "synthetic-logger-0.25lps-pod",
|
||||||
|
# "container_name": "synth-lgr"
|
||||||
|
# },
|
||||||
|
# "docker": {
|
||||||
|
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# This makes it easier for users to search for logs by pod name or by
|
||||||
|
# the name of the Kubernetes container regardless of how many times the
|
||||||
|
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
|
||||||
|
#
|
||||||
|
# TODO: Propagate the labels associated with a container along with its logs
|
||||||
|
# so users can query logs using labels as well as or instead of the pod name
|
||||||
|
# and container name. This is simply done via configuration of the Kubernetes
|
||||||
|
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
|
||||||
|
# problem yet to be solved as secrets are not usable in static pods which the fluentd
|
||||||
|
# pod must be until a per-node controller is available in Kubernetes.
|
||||||
|
# Prevent fluentd from handling records containing its own logs. Otherwise
|
||||||
|
# it can lead to an infinite loop, when error in sending one message generates
|
||||||
|
# another message which also fails to be sent and so on.
|
||||||
|
<match fluent.**>
|
||||||
|
type null
|
||||||
|
</match>
|
||||||
|
# Example:
|
||||||
|
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
path /var/log/containers/*.log
|
||||||
|
pos_file /var/log/es-containers.log.pos
|
||||||
|
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||||
|
tag kubernetes.*
|
||||||
|
format json
|
||||||
|
read_from_head true
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||||
|
time_format %Y-%m-%d %H:%M:%S
|
||||||
|
path /var/log/salt/minion
|
||||||
|
pos_file /var/log/es-salt.pos
|
||||||
|
tag salt
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format syslog
|
||||||
|
path /var/log/startupscript.log
|
||||||
|
pos_file /var/log/es-startupscript.log.pos
|
||||||
|
tag startupscript
|
||||||
|
</source>
|
||||||
|
# Examples:
|
||||||
|
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||||
|
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||||
|
path /var/log/docker.log
|
||||||
|
pos_file /var/log/es-docker.log.pos
|
||||||
|
tag docker
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
# Not parsing this, because it doesn't have anything particularly useful to
|
||||||
|
# parse out of it (like severities).
|
||||||
|
format none
|
||||||
|
path /var/log/etcd.log
|
||||||
|
pos_file /var/log/es-etcd.log.pos
|
||||||
|
tag etcd
|
||||||
|
</source>
|
||||||
|
# Multi-line parsing is required for all the kube logs because very large log
|
||||||
|
# statements, such as those that include entire object bodies, get split into
|
||||||
|
# multiple lines by glog.
|
||||||
|
# Example:
|
||||||
|
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/kubelet.log
|
||||||
|
pos_file /var/log/es-kubelet.log.pos
|
||||||
|
tag kubelet
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/kube-proxy.log
|
||||||
|
pos_file /var/log/es-kube-proxy.log.pos
|
||||||
|
tag kube-proxy
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/kube-apiserver.log
|
||||||
|
pos_file /var/log/es-kube-apiserver.log.pos
|
||||||
|
tag kube-apiserver
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/kube-controller-manager.log
|
||||||
|
pos_file /var/log/es-kube-controller-manager.log.pos
|
||||||
|
tag kube-controller-manager
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/kube-scheduler.log
|
||||||
|
pos_file /var/log/es-kube-scheduler.log.pos
|
||||||
|
tag kube-scheduler
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/rescheduler.log
|
||||||
|
pos_file /var/log/es-rescheduler.log.pos
|
||||||
|
tag rescheduler
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/glbc.log
|
||||||
|
pos_file /var/log/es-glbc.log.pos
|
||||||
|
tag glbc
|
||||||
|
</source>
|
||||||
|
# Example:
|
||||||
|
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||||
|
<source>
|
||||||
|
type tail
|
||||||
|
format multiline
|
||||||
|
multiline_flush_interval 5s
|
||||||
|
format_firstline /^\w\d{4}/
|
||||||
|
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||||
|
time_format %m%d %H:%M:%S.%N
|
||||||
|
path /var/log/cluster-autoscaler.log
|
||||||
|
pos_file /var/log/es-cluster-autoscaler.log.pos
|
||||||
|
tag cluster-autoscaler
|
||||||
|
</source>
|
||||||
|
<filter kubernetes.**>
|
||||||
|
type kubernetes_metadata
|
||||||
|
</filter>
|
||||||
|
## Prometheus Exporter Plugin
|
||||||
|
## input plugin that exports metrics
|
||||||
|
#<source>
|
||||||
|
# type prometheus
|
||||||
|
#</source>
|
||||||
|
#<source>
|
||||||
|
# type monitor_agent
|
||||||
|
#</source>
|
||||||
|
#<source>
|
||||||
|
# type forward
|
||||||
|
#</source>
|
||||||
|
## input plugin that collects metrics from MonitorAgent
|
||||||
|
#<source>
|
||||||
|
# @type prometheus_monitor
|
||||||
|
# <labels>
|
||||||
|
# host ${hostname}
|
||||||
|
# </labels>
|
||||||
|
#</source>
|
||||||
|
## input plugin that collects metrics for output plugin
|
||||||
|
#<source>
|
||||||
|
# @type prometheus_output_monitor
|
||||||
|
# <labels>
|
||||||
|
# host ${hostname}
|
||||||
|
# </labels>
|
||||||
|
#</source>
|
||||||
|
## input plugin that collects metrics for in_tail plugin
|
||||||
|
#<source>
|
||||||
|
# @type prometheus_tail_monitor
|
||||||
|
# <labels>
|
||||||
|
# host ${hostname}
|
||||||
|
# </labels>
|
||||||
|
#</source>
|
||||||
|
<match **>
|
||||||
|
type elasticsearch
|
||||||
|
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
|
||||||
|
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
|
||||||
|
log_level info
|
||||||
|
include_tag_key true
|
||||||
|
host elasticsearch-logging
|
||||||
|
port 9200
|
||||||
|
logstash_format true
|
||||||
|
# Set the chunk limit the same as for fluentd-gcp.
|
||||||
|
buffer_chunk_limit 2M
|
||||||
|
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
|
||||||
|
buffer_queue_limit 32
|
||||||
|
flush_interval 5s
|
||||||
|
# Never wait longer than 5 minutes between retries.
|
||||||
|
max_retry_wait 30
|
||||||
|
# Disable the limit on the number of retries (retry forever).
|
||||||
|
disable_retry_limit
|
||||||
|
# Use multiple threads for processing.
|
||||||
|
num_threads 8
|
||||||
|
</match>
|
|
@ -23,7 +23,7 @@ spec:
|
||||||
command:
|
command:
|
||||||
- '/bin/sh'
|
- '/bin/sh'
|
||||||
- '-c'
|
- '-c'
|
||||||
- '/usr/sbin/td-agent 2>&1 >> /var/log/fluentd.log'
|
- '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
|
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
|
||||||
|
@ -39,6 +39,8 @@ spec:
|
||||||
- name: varlibdockercontainers
|
- name: varlibdockercontainers
|
||||||
mountPath: /var/lib/docker/containers
|
mountPath: /var/lib/docker/containers
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
- name: config
|
||||||
|
mountPath: "{{ fluentd_config_dir }}"
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
volumes:
|
volumes:
|
||||||
- name: varlog
|
- name: varlog
|
||||||
|
@ -47,3 +49,10 @@ spec:
|
||||||
- name: varlibdockercontainers
|
- name: varlibdockercontainers
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/docker/containers
|
path: /var/lib/docker/containers
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: fluentd-config
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: efk
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -44,3 +44,7 @@ spec:
|
||||||
- containerPort: 5601
|
- containerPort: 5601
|
||||||
name: ui
|
name: ui
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: efk
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue