docker-compos-部署-logstash
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
version: "3"
services:
logstash-k8s:
hostname: logstash-k8s
image: logstash:7.6.1
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /opt/logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- /opt/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
logstash.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
input{
kafka {
bootstrap_servers => "172.16.0.***:9092"
topics_pattern => "k8s-prod-.*"
consumer_threads => 12
decorate_events => true
codec => "json"
auto_offset_reset => "latest"
group_id => "logstash-logs-k8s-prod"
}
}
#filter {
# ruby {
# code => "event.timestamp.time.localtime"
# }
#
#
# mutate {
# remove_field => ["beat"]
# }
#
# grok {
# match => {"message" => "\[(?<time>\d+-\d+-\d+\s\d+:\d+:\d+)\] \[(?<level>\w+)\] (?<thread>[\w|-]+) (?<class>[\w|\.]+) (?<lineNum>\d+):(?<msg>.+)"}
# }
#}
filter {
# mutate {
# add_field => {"[@metadata][index]" => "%{[kafka][topic]}"}
# }
}
output{
elasticsearch{
hosts => [ "172.16.0.***:9200" ]
user => "elastic"
password => "p5a&Y$hmyYvoV2TN"
# es 的 index 只能小写不能大写
index => "k8s-prod-logs-%{+YYYY-MM-dd}"
}
# 这个可以不需要,不让会将所有kaffka取得到的信息打印出来
# stdout{
# codec => rubydebug
# }
}
logstash.yml
1
2
http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.url: http://172.16.0.***:9200
本文由作者按照
CC BY 4.0
进行授权