1.logstash
1.各分布式项目下创建logback-logstash.xml配置文件
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<springProperty scope="context" name="logstashService" source="logstash.service"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${logstashService}</destination>
<queueSize>1048576</queueSize>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" />
</appender>
<!-- 为logstash输出的JSON格式的Appender -->
<appender name="LOGSTASHJSON"
class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${logstashService}</destination>
<queueSize>1048576</queueSize>
<!-- 日志输出编码 -->
<encoder
class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="LOGSTASH"/>
<appender-ref ref="LOGSTASHJSON"/>
</root>
</configuration>
boostrap.yml添加xml配置的logstashService服务器地址+端口
logstash:
service: 192.168.11.44:9600
2.linux logstash安装服务器
/etc/logstash/logstash.conf 文件配置
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 9600
codec => json
}
}
filter {
grok {
match => {
"message" => "%{TIMESTAMP_IOS08601:timestamp}+%{LOGLEVEL:severity}+[%DATA:service],%{DATA:trace},%{DATA:span},%{DATA:exporttable}]+%{DATA:pid}---+[%{DATA:thread}]+%{DATA:class}+:+%{GREEDYDATA:rest}"
}
}
}
output {
elasticsearch {
hosts => "192.168.11.32:9200"
index => "serverlog-11.41-%{+YYYY.MM.dd}"
}
}
运行logstash
/usr/share/logstash/bin/./logstash -f /etc/logstash/logstash.conf >/dev/null &
2.Elasticsearch
3.kibana
配置上logstash conf中的serverlog* 即可查看到对应的log