日志系统在k8s集群上是以完备的系统或者组件!!!
\1. 查看系统Event事件
kubectl describe pod --namespace=
该命令可以显示Pod创建时的配置定义、状态等信息和最近的Event事件,事件信息可用于排错。例如当Pod状态为Pending,可通过查看Event事件确认原因,一般原因有几种:
1没有可用的Node可调度 2开启了资源配额管理并且当前Pod的目标节点上恰好没有可用的资源 3正在下载镜像(镜像拉取耗时太久)或镜像下载失败。
kubectl describe还可以查看其它k8s对象:NODE,RC,Service,Namespace,Secrets
\2. 查看容器日志
kubectl logs <pod_name>
3.使用linux 自带的查看日志
在Linux系统上systemd系统来管理kubernetes服务,并且journal系统会接管服务程序的输出日志,可以通过systemctl status 或journalctl -u -f来查看kubernetes服务的日志。其中kubernetes组件包括: k8s组件
第一种:Node上部署一个日志收集系统程序
· daemonset方式部署日志收集程序
·对本节点/var/log 和/var/lib/docker/containers/ 两个目录下的日志进行收集
第二种:POD中附加专用日志收集的容器
·每个运行的应用程序的POD中增加一个日志收集容器,使用emtyDir共享日志目录 让日志收集程序读取到
第三种: 应用程序直接推送日志
在k8s范围之外进行操作;容器内应用将日志直接发送到日志中心,比如java程序可以使用log4j2转换日志格式并发送到远端。或者通过修改docker的–log-driver。可以利用不同的driver把日志输出到不同地方,将log-driver设置为syslog、fluentd、splunk等日志收集服务,然后发送到远端。
Filebear、fluentd (/var/log---->/var/log/containers/)
在k8s集群上 基于节点来部署 (注意:如果收集日志建立在扣k8s 之上的话 请合理分配资源 ,防止efk把资源站完,将K8S上的基础pods 顶掉)
Fluentd 可以部署在demonset 托管在k8s之上
Kibana必须和elasticsearch的版本比配(版本号一致)
helm fetch stable/elasticsearch
tar xvzf elasticsearch-1.32.1.tgz
创建k8s的namespace:
kubectl create ns efk
同时 如果想要启动Prometheus的话 请删除掉对应的注释来开启(默认情况下 是注释关闭的)
#修改values.yaml的属性
# Default values for elasticsearch.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
appVersion: "6.8.2"
## Define serviceAccount names for components. Defaults to component's fully qualified name.
##
serviceAccounts:
client:
create: true
name:
master:
create: true
name:
data:
create: true
name:
## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
securityContext:
enabled: false
runAsUser: 1000
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: "192.168.208.195/library/elasticsearch/elasticsearch-oss"
tag: "6.8.2"
pullPolicy: "IfNotPresent"
# 我修改了我自己的私有仓库
testFramework:
image: "dduportal/bats"
tag: "0.4.0"
initImage:
repository: "busybox"
tag: "latest"
pullPolicy: "Always"
cluster:
name: "elasticsearch"
# If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want
# enabled in the environment variables outlined in the README
xpackEnable: false
# Some settings must be placed in a keystore, so they need to be mounted in from a secret.
# Use this setting to specify the name of the secret
# keystoreSecret: eskeystore
config: {}
# Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable
additionalJavaOpts: ""
# Command to run at the end of deployment
bootstrapShellCommand: ""
env:
# IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes
# To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible
# node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster.
MINIMUM_MASTER_NODES: "2"
# 如果内存不够 资源不够的情况下 这里 以及后面的client master data 都改成1个
plugins: []
# - ingest-attachment
# - mapper-size
loggingYml:
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console
logger:
# log action execution errors for easier debugging
action: DEBUG
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
log4j2Properties: |
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
logger.searchguard.name = com.floragunn
logger.searchguard.level = info
client:
name: client
replicas: 2
serviceType: ClusterIP
## If coupled with serviceType = "NodePort", this will set a specific nodePort to the client HTTP port
# httpNodePort: 30920
loadBalancerIP: {}
loadBalancerSourceRanges: {}
## (dict) If specified, apply these annotations to the client service
# serviceAnnotations:
# example: client-svc-foo
heapSize: "512m"
# additionalJavaOpts: "-XX:MaxRAM=512m"
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
# terminationGracePeriodSeconds: 60
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "1024Mi"
requests:
cpu: "25m"
memory: "512Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each client Pod
# podAnnotations:
# example: client-foo
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
hooks: {}
## (string) Script to execute prior the client pod stops.
# preStop: |-
## (string) Script to execute after the client pod starts.
# postStart: |-
ingress:
enabled: false
# user: NAME
# password: PASSWORD
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
master:
name: master
exposeHttp: false
replicas: 3
heapSize: "512m"
# additionalJavaOpts: "-XX:MaxRAM=512m"
persistence:
enabled: false
accessMode: ReadWriteOnce
name: data
size: "4Gi"
# storageClass: "ssd"
# readinessProbe:
# httpGet:
# path: /_cluster/health?local=true
# port: 9200
# initialDelaySeconds: 5
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
# terminationGracePeriodSeconds: 60
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "1024Mi"
requests:
cpu: "25m"
memory: "512Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each master Pod
# podAnnotations:
# example: master-foo
podManagementPolicy: OrderedReady
podDisruptionBudget:
enabled: false
minAvailable: 2 # Same as `cluster.env.MINIMUM_MASTER_NODES`
# maxUnavailable: 1
updateStrategy:
type: OnDelete
hooks: {}
## (string) Script to execute prior the master pod stops.
# preStop: |-
## (string) Script to execute after the master pod starts.
# postStart: |-
data:
name: data
exposeHttp: false
replicas: 2
heapSize: "1536m"
# additionalJavaOpts: "-XX:MaxRAM=1536m"
persistence:
enabled: false
accessMode: ReadWriteOnce
name: data
size: "30Gi"
# storageClass: "ssd"
# readinessProbe:
# httpGet:
# path: /_cluster/health?local=true
# port: 9200
# initialDelaySeconds: 5
terminationGracePeriodSeconds: 3600
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "2048Mi"
requests:
cpu: "25m"
memory: "1536Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each data Pod
# podAnnotations:
# example: data-foo
podDisruptionBudget:
enabled: false
# minAvailable: 1
maxUnavailable: 1
podManagementPolicy: OrderedReady
updateStrategy:
type: OnDelete
hooks:
## Drain the node before stopping it and re-integrate it into the cluster after start.
## When enabled, it supersedes `data.hooks.preStop` and `data.hooks.postStart` defined below.
drain:
enabled: true
## (string) Script to execute prior the data pod stops. Ignored if `data.hooks.drain.enabled` is true (default)
# preStop: |-
# #!/bin/bash
# exec &> >(tee -a "/var/log/elasticsearch-hooks.log")
# NODE_NAME=${HOSTNAME}
# curl -s -XPUT -H 'Content-Type: application/json' '{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings' -d "{
# \"transient\" :{
# \"cluster.routing.allocation.exclude._name\" : \"${NODE_NAME}\"
# }
# }"
# echo "Node ${NODE_NAME} is exluded from the allocation"
## (string) Script to execute after the data pod starts. Ignored if `data.hooks.drain.enabled` is true (default)
# postStart: |-
# #!/bin/bash
# exec &> >(tee -a "/var/log/elasticsearch-hooks.log")
# NODE_NAME=${HOSTNAME}
# CLUSTER_SETTINGS=$(curl -s -XGET "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings")
# if echo "${CLUSTER_SETTINGS}" | grep -E "${NODE_NAME}"; then
# echo "Activate node ${NODE_NAME}"
# curl -s -XPUT -H 'Content-Type: application/json' "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings" -d "{
# \"transient\" :{
# \"cluster.routing.allocation.exclude._name\" : null
# }
# }"
# fi
# echo "Node ${NODE_NAME} is ready to be used"
## Sysctl init container to setup vm.max_map_count
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
# and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall
sysctlInitContainer:
enabled: true
## Chown init container to change ownership of data and logs directories to elasticsearch user
chownInitContainer:
enabled: true
## Additional init containers
extraInitContainers: |
forceIpv6: false
如果data 一直在重启的话 那请禁用掉健康检查!!!!!!!
helm install go-efk . --namespace=efk
[root@k8s-master1 ~]# kubectl get pods -n efk -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
gomo-efk-elasticsearch-client-b494bdcdb-8mjdg 1/1 Running 0 44m 172.24.126.25 k8s-worker2 <none> <none>
gomo-efk-elasticsearch-client-b494bdcdb-drf6x 1/1 Running 0 44m 172.24.24.232 k8s-worker4 <none> <none>
gomo-efk-elasticsearch-data-0 1/1 Running 0 44m 172.24.194.86 k8s-worker1 <none> <none>
gomo-efk-elasticsearch-data-1 1/1 Running 0 44m 172.24.100.226 k8s-worker3 <none> <none>
gomo-efk-elasticsearch-master-0 1/1 Running 0 44m 172.24.24.231 k8s-worker4 <none> <none>
gomo-efk-elasticsearch-master-1 1/1 Running 0 44m 172.24.126.26 k8s-worker2 <none> <none>
gomo-efk-elasticsearch-master-2 1/1 Running 0 44m 172.24.194.87 k8s-worker1 <none> <none>
#打开一个虚拟机docker 查看内容
[root@k8s-master1 ~]# kubectl run cirror-$RANDOM --rm -it --image=cirros -- /bin/sh
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.
/ # nslookup gomo-efk-elasticsearch-client.efk.svc
Server: 10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name: gomo-efk-elasticsearch-client.efk.svc
Address 1: 10.254.114.254 gomo-efk-elasticsearch-client.efk.svc.cluster.local
/ # curl gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200
{
"name" : "gomo-efk-elasticsearch-client-b494bdcdb-8mjdg",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "EklVAWzOTv6YlS-nTUUF7A",
"version" : {
"number" : "6.8.2",
"build_flavor" : "oss",
"build_type" : "docker",
"build_hash" : "b506955",
"build_date" : "2019-07-24T15:24:41.545295Z",
"build_snapshot" : false,
"lucene_version" : "7.7.0",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
/ # curl gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200/_cat/nodes
172.24.126.25 19 95 17 0.49 0.50 0.50 i - gomo-efk-elasticsearch-client-b494bdcdb-8mjdg
172.24.24.232 20 73 12 0.28 0.64 0.73 i - gomo-efk-elasticsearch-client-b494bdcdb-drf6x
172.24.24.231 18 73 12 0.28 0.64 0.73 mi - gomo-efk-elasticsearch-master-0
172.24.194.87 22 96 12 0.14 0.41 0.52 mi - gomo-efk-elasticsearch-master-2
172.24.100.226 5 96 10 0.17 0.37 0.38 di - gomo-efk-elasticsearch-data-1
172.24.126.26 19 95 15 0.49 0.50 0.50 mi * gomo-efk-elasticsearch-master-1
172.24.194.86 7 96 16 0.14 0.41 0.52 di - gomo-efk-elasticsearch-data-0
/ # curl gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200/_cat/indices
/
#此时这里还是什么都没有的状态需要安装fluentd
安装fluentd-elasticsearch
helm fetch stable/fluentd-elasticsearch
tar xvzf fluentd-elasticsearch-2.0.7.tgz
修改values.yaml
image:
repository: 192.168.208.195/library/fluentd-elasticsearch
## 修改了我的私有仓库
tag: v2.3.2
pullPolicy: IfNotPresent
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 200Mi
#host需要修改成自己elastic的对应域名 需要写全
elasticsearch:
host: 'gomo-efk-elasticsearch-client.efk.svc.cluster.local'
port: 9200
scheme: 'http'
ssl_version: TLSv1_2
buffer_chunk_limit: 2M
buffer_queue_limit: 8
logstash_prefix: 'logstash'
# If you want to add custom environment variables, use the env dict
# You can then reference these in your config file e.g.:
# user "#{ENV['OUTPUT_USER']}"
env:
# OUTPUT_USER: my_user
# If you want to add custom environment variables from secrets, use the secret list
secret:
# - name: ELASTICSEARCH_PASSWORD
# secret_name: elasticsearch
# secret_key: password
rbac:
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
livenessProbe:
enabled: true
annotations: {}
#开启监控
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "24231"
## DaemonSet update strategy
## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
updateStrategy:
type: RollingUpdate
tolerations: {}
# - key: node-role.kubernetes.io/master
# operator: Exists
# effect: NoSchedule
nodeSelector: {}
service:
type: ClusterIP
ports:
- name: "monitor-agent"
port: 24231
configMaps:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/fluentd-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
format json
read_from_head true
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
<source>
@id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
<source>
@id journald-docker
@type systemd
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
</source>
<source>
@id journald-kubelet
@type systemd
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id journald-node-problem-detector
@type systemd
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
</source>
<source>
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
@type kubernetes_metadata
</filter>
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
type_name _doc
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
logstash_format true
logstash_prefix "#{ENV['LOGSTASH_PREFIX']}"
reconnect_on_error true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action block
</buffer>
</match>
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true
helm install --name gomo-fluentd --namespace=efk .
[root@k8s-master1 yml]# kubectl get pods -n efk
NAME READY STATUS RESTARTS AGE
gomo-efk-elasticsearch-client-b494bdcdb-8mjdg 1/1 Running 0 27m
gomo-efk-elasticsearch-client-b494bdcdb-drf6x 1/1 Running 0 27m
gomo-efk-elasticsearch-data-0 1/1 Running 0 27m
gomo-efk-elasticsearch-data-1 1/1 Running 0 27m
gomo-efk-elasticsearch-master-0 1/1 Running 0 27m
gomo-efk-elasticsearch-master-1 1/1 Running 0 27m
gomo-efk-elasticsearch-master-2 1/1 Running 0 27m
gomo-flu-fluentd-elasticsearch-2vp4m 1/1 Running 0 12m
gomo-flu-fluentd-elasticsearch-b8wtn 1/1 Running 0 12m
gomo-flu-fluentd-elasticsearch-prstw 1/1 Running 0 11m
gomo-flu-fluentd-elasticsearch-w74j5 1/1 Running 0 11m
#此时再次回到ciroor的虚拟机docker
/ # curl gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200/_cat/indices
green open logstash-2019.12.18 FeDv-0dCQpuTFg5eTdBCaw 5 1 156 0 620.3kb 450.8kb
green open logstash-2019.12.17 d4sDdsyKSIuKbnaDsh4Uuw 5 1 5 0 122.6kb 87.1kb
green open logstash-2019.12.19 g3_V_2ZHTfSt9mBiKG_D5g 5 1 444 0 949.9kb 708.2kb
yellow open logstash-2019.12.16 OHaEX6I4QvK2sCI2G1DwDA 5 1 2841 0 2.5mb 2mb
yellow open logstash-2019.12.20 U43viRYtQz-AGrVjsJrnww 5 1 838 0 1.1mb 1005.6kb
green open logstash-2019.12.15 jHtwQkHVRI-lm6gIh2VqXQ 5 1 9426 0 8mb 5.4mb
/ # curl gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200/_cat/indices
green open logstash-2019.12.18 FeDv-0dCQpuTFg5eTdBCaw 5 1 207 0 620.3kb 450.8kb
green open logstash-2019.12.17 d4sDdsyKSIuKbnaDsh4Uuw 5 1 5 0 122.6kb 87.1kb
green open logstash-2019.12.19 g3_V_2ZHTfSt9mBiKG_D5g 5 1 495 0 949.9kb 708.2kb
yellow open logstash-2019.12.16 OHaEX6I4QvK2sCI2G1DwDA 5 1 2898 0 2.5mb 2mb
green open logstash-2019.12.20 U43viRYtQz-AGrVjsJrnww 5 1 1063 0 1.1mb 1005.6kb
green open logstash-2019.12.15 jHtwQkHVRI-lm6gIh2VqXQ 5 1 9426 0 8mb 5.4mb
成功连接 日志正常传送完成
最后下载图形界面kibana
helm fetch stable/kibana
tar zxvf kibana-3.2.5.tgz
#修改values.yaml的配置文件
image:
repository: "192.168.208.195/library/kibana/kibana-oss"
tag: "6.8.2"
pullPolicy: "IfNotPresent"
testFramework:
enabled: "true"
image: "dduportal/bats"
tag: "0.4.0"
commandline:
args: []
env: {}
## All Kibana configuration options are adjustable via env vars.
## To adjust a config option to an env var uppercase + replace `.` with `_`
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
## For kibana < 6.6, use ELASTICSEARCH_URL instead
# ELASTICSEARCH_HOSTS: http://elasticsearch-client:9200
# SERVER_PORT: 5601
# LOGGING_VERBOSE: "true"
# SERVER_DEFAULTROUTE: "/app/kibana"
envFromSecrets: {}
## Create a secret manually. Reference it here to inject environment variables
# ELASTICSEARCH_USERNAME:
# from:
# secret: secret-name-here
# key: ELASTICSEARCH_USERNAME
# ELASTICSEARCH_PASSWORD:
# from:
# secret: secret-name-here
# key: ELASTICSEARCH_PASSWORD
files:
kibana.yml:
## Default Kibana configuration from kibana-docker.
server.name: kibana
server.host: "0"
## For kibana < 6.6, use elasticsearch.url instead
elasticsearch.hosts: http://gomo-efk-elasticsearch-client.efk.svc.cluster.local:9200
#### 主要修改这里elastic的url+9200的端口
## Custom config properties below
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
# server.port: 5601
# logging.verbose: "true"
# server.defaultRoute: "/app/kibana"
deployment:
annotations: {}
## 暴露端口
service:
type: NodePort
# clusterIP: None
# portName: kibana-svc
externalPort: 443
internalPort: 5601
# authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
#
## LoadBalancer IP if service.type is LoadBalancer
## Default: nil
##
# loadBalancerIP: 10.2.2.2
annotations: {}
# Annotation example: setup ssl with aws cert when service.type is LoadBalancer
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
labels: {}
## Label example: show service URL in `kubectl cluster-info`
# kubernetes.io/cluster-service: "true"
## Limit load balancer source ips to list of CIDRs (where available)
# loadBalancerSourceRanges: []
selector: {}
ingress:
enabled: false
# hosts:
# - kibana.localhost.localdomain
# - localhost.localdomain/kibana
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
serviceAccount:
# Specifies whether a service account should be created
create: false
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# If set and create is false, the service account must be existing
name:
livenessProbe:
enabled: false
path: /status
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
enabled: false
path: /status
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 5
# Enable an authproxy. Specify container in extraContainers
authProxyEnabled: false
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - --resource=uri=/*
# - --discovery-url=https://discovery-url
# - --client-id=client
# - --client-secret=secret
# - --listen=0.0.0.0:5602
# - --upstream-url=http://127.0.0.1:5601
# ports:
# - name: web
# containerPort: 9090
extraVolumeMounts: []
extraVolumes: []
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
priorityClassName: ""
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
podAnnotations: {}
replicaCount: 1
revisionHistoryLimit: 3
# Custom labels for pod assignment
podLabels: {}
# To export a dashboard from a running Kibana 6.3.x use:
# curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json
# A dashboard is defined by a name and a string with the json payload or the download url
dashboardImport:
enabled: false
timeout: 60
xpackauth:
enabled: false
username: myuser
password: mypass
dashboards: {}
# k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json
# List of plugins to install using initContainer
# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.
plugins:
# set to true to enable plugins installation
enabled: false
# set to true to remove all kibana plugins before installation
reset: false
# Use <plugin_name,version,url> to add/upgrade plugin
values:
# - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip
# - logtrail,0.1.31,https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-6.6.0-0.1.31.zip
# - other_plugin
persistentVolumeClaim:
# set to true to use pvc
enabled: false
# set to true to use you own pvc
existingClaim: false
annotations: {}
accessModes:
- ReadWriteOnce
size: "5Gi"
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# default security context
securityContext:
enabled: false
allowPrivilegeEscalation: false
runAsUser: 1000
fsGroup: 2000
extraConfigMapMounts: []
# - name: logtrail-configs
# configMap: kibana-logtrail
# mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json
# subPath: logtrail.json
# Add your own init container or uncomment and modify the given example.
initContainers: {}
## Don't start kibana till Elasticsearch is reachable.
## Ensure that it is available at http://elasticsearch:9200
##
# es-check: # <- will be used as container name
# image: "appropriate/curl:latest"
# imagePullPolicy: "IfNotPresent"
# command:
# - "/bin/sh"
# - "-c"
# - |
# is_down=true
# while "$is_down"; do
# if curl -sSf --fail-early --connect-timeout 5 http://elasticsearch:9200; then
# is_down=false
# else
# sleep 5
# fi
# done
helm install gomo-kabana --namespace=efk .
[root@k8s-master1 yml]# kubectl get pods -n efk
NAME READY STATUS RESTARTS AGE
gomo-efk-elasticsearch-client-b494bdcdb-8mjdg 1/1 Running 0 27m
gomo-efk-elasticsearch-client-b494bdcdb-drf6x 1/1 Running 0 27m
gomo-efk-elasticsearch-data-0 1/1 Running 0 27m
gomo-efk-elasticsearch-data-1 1/1 Running 0 27m
gomo-efk-elasticsearch-master-0 1/1 Running 0 27m
gomo-efk-elasticsearch-master-1 1/1 Running 0 27m
gomo-efk-elasticsearch-master-2 1/1 Running 0 27m
gomo-flu-fluentd-elasticsearch-2vp4m 1/1 Running 0 12m
gomo-flu-fluentd-elasticsearch-b8wtn 1/1 Running 0 12m
gomo-flu-fluentd-elasticsearch-prstw 1/1 Running 0 11m
gomo-flu-fluentd-elasticsearch-w74j5 1/1 Running 0 11m
gomo-kabana-kibana-bfbbb85c-hp82w 1/1 Running 0 8s
打开浏览器输入NodeIP+端口
(ELK的L 有很多种 自行百度 主要讲解EFK)
· 日志记录代理(logging-agent):日志记录代理用于从容器中获取日志信息,使用Fluentd;
· 日志记录后台(Logging-Backend):日志记录后台用于处理日志记录代理推送过来的日志,使用Elasticsearch;
· 日志记录展示:日志记录展示用于向用户显示统一的日志信息,使用Kibana。
· 在Kubernetes中通过了Elasticsearch 附加组件,此组件包括Elasticsearch、Fluentd和Kibana。Elasticsearch是一种负责存储日志并允许查询的搜索引擎。Fluentd从Kubernetes中获取日志消息,并发送到Elasticsearch;而Kibana是一个图形界面,用于查看和查询存储在Elasticsearch中的日志。
https://blog.csdn.net/luanpeng825485697/article/details/83312662
https://www.cnblogs.com/Dev0ps/p/10778962.html
Nginx-configmap.yaml:
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-nginx-config
data:
filebeat.yml: |-
filebeat.prospectors:
- type: log
paths:
- /var/log/nginx/access.log
# tags: ["access"]
fields:
app: www
type: nginx-access
fields_under_root: true
- type: log
paths:
- /var/log/nginx/error.log
# tags: ["error"]
fields:
app: www
type: nginx-error
fields_under_root: true
output.logstash:
hosts: ['192.168.208.190:5044']
Nginx-deployment.yaml:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-demo
spec:
replicas: 3
selector:
matchLabels:
project: www
app: www
template:
metadata:
labels:
project: www
app: www
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: nginx
image: 192.168.208.195:80/library/nginx:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
resources:
requests:
cpu: 0.5
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
livenessProbe:
httpGet:
path: /index.html
port: 80
initialDelaySeconds: 6
timeoutSeconds: 20
volumeMounts:
- name: nginx-logs
mountPath: /var/log/nginx/
- name: filebeat
image: 192.168.208.195:80/library/filebeat:6.4.2
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: nginx-logs
mountPath: /var/log/nginx/
volumes:
- name: nginx-logs
emptyDir: {}
- name: filebeat-config
configMap:
name: filebeat-nginx-config
cat /etc/logstash/conf.d/logstash-to-es.conf
input {
beats {
port => 5044
}
}
filter {
}
output {
if [app] == "www" {
if [type] == "nginx-access" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
else if [type] == "nginx-error" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
} else if [app] == "k8s" {
if [type] == "module" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "k8s-log-%{+YYYY.MM.dd}"
}
}
}
# stdout { codec=> rubydebug }
}
和上面的nginx一样的 详情请看https://www.cnblogs.com/Dev0ps/p/10778962.html
tomcat-deployment.yaml:
apiVersion: apps/v1
kind: Deployment
metadata:
name: tomcat-java-demo
spec:
replicas: 3
selector:
matchLabels:
project: www
app: www
template:
metadata:
labels:
project: www
app: www
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: tomcat
image: 192.168.208.195:80/library/tomcat:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 1Gi
limits:
cpu: 1
memory: 2Gi
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
volumeMounts:
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs
- name: filebeat
image: docker.elastic.co/beats/filebeat:6.4.2
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs
volumes:
- name: tomcat-logs
emptyDir: {}
- name: filebeat-config
configMap:
name: filebeat-config
filebeat-tomcat-configmap.yaml:
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
data:
filebeat.yml: |-
filebeat.prospectors:
- type: log
paths:
- /usr/local/tomcat/logs/catalina.*
# tags: ["tomcat"]
fields:
app: www
type: tomcat-catalina
fields_under_root: true
multiline:
pattern: '^\['
negate: true
match: after
output.logstash:
hosts: ['192.168.208.190:5044']
[root@master logs]# cat /etc/logstash/conf.d/logstash-to-es.conf
input {
beats {
port => 5044
}
}
filter {
}
output {
if [app] == "www" {
if [type] == "nginx-access" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
else if [type] == "nginx-error" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
else if [type] == "tomcat-catalina" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "tomcat-catalina-%{+YYYY.MM.dd}"
}
}
} else if [app] == "k8s" {
if [type] == "module" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "k8s-log-%{+YYYY.MM.dd}"
}
}
}
# stdout { codec=> rubydebug }
}