為了友善的去檢視日志。本次介紹Logback 日志內建 logstash。通過logstatsh 可以寫入到elasticsearch
1、整合logback
添加pom
<!--內建logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>${logstash-logback-encoder.version}</version>
</dependency>
2、logback配置logstash
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--可以通路的logstash日志收集端口-->
<destination>ip:port</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
<pattern>
{
"app_name":"${APP_NAME}",
"traceid":"%X{traceid}",
"ip": "%X{ip}",
"server_name": "%X{server_name}",
"level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"thread": "%thread",
"class": "%logger{40} - %M:%L",
"message": "%message",
"stack_trace": "%exception{10}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC_ROLLING_FILE"/>
<appender-ref ref="LOGSTASH"/>
</root>
主要是這兩段 。 完整的logback-spring.xml配置如下
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<springProperty scope="context" name="APP_NAME" source="spring.application.name" defaultValue="undefinedAppName"/>
<!-- <include resource="org/springframework/boot/logging/logback/base.xml"/> -->
<jmxConfigurator/>
<logger name="org.springframework.web" level="INFO"/>
<logger name="org.apache.velocity.runtime.log" level="INFO"/>
<!-- 控制台輸出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>[%X{traceid}] %d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %M:%L - %msg %n</Pattern>
</encoder>
</appender>
<appender name="dailyRollingFileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>logs/logback-today-${APP_NAME}.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rolling over -->
<FileNamePattern>logs/${APP_NAME}.%d{yyyy-MM-dd}.log</FileNamePattern>
<!-- keep 30 days' log history -->
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder>
<Pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %msg %n</Pattern>
<!-- 異步寫入檔案 -->
<appender name="ASYNC_ROLLING_FILE" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.預設的,如果隊列的80%已滿,則會丢棄TRACT、DEBUG、INFO級别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改預設的隊列的深度,該值會影響性能.預設值為256 -->
<queueSize>256</queueSize>
<!-- 添加附加的appender,最多隻能添加一個 -->
<appender-ref ref="dailyRollingFileAppender"/>
</appender>
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--可以通路的logstash日志收集端口-->
<destination>ip:port</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
{
"app_name":"${APP_NAME}",
"traceid":"%X{traceid}",
"ip": "%X{ip}",
"server_name": "%X{server_name}",
"level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"thread": "%thread",
"class": "%logger{40} - %M:%L",
"message": "%message",
"stack_trace": "%exception{10}"
}
</pattern>
</pattern>
</providers>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC_ROLLING_FILE"/>
<appender-ref ref="LOGSTASH"/>
</root>
</configuration>
3、logstash配置
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
}
output {
elasticsearch {
hosts => "es:9200"
index => "springboot-logstash-%{+YYYY.MM.dd}"