为了方便的去查看日志。本次介绍Logback 日志集成 logstash。通过logstatsh 可以写入到elasticsearch
1、整合logback
添加pom
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>${logstash-logback-encoder.version}</version>
</dependency>
2、logback配置logstash
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--可以访问的logstash日志收集端口-->
<destination>ip:port</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
<pattern>
{
"app_name":"${APP_NAME}",
"traceid":"%X{traceid}",
"ip": "%X{ip}",
"server_name": "%X{server_name}",
"level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"thread": "%thread",
"class": "%logger{40} - %M:%L",
"message": "%message",
"stack_trace": "%exception{10}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC_ROLLING_FILE"/>
<appender-ref ref="LOGSTASH"/>
</root>
主要是这两段 。 完整的logback-spring.xml配置如下
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<springProperty scope="context" name="APP_NAME" source="spring.application.name" defaultValue="undefinedAppName"/>
<!-- <include resource="org/springframework/boot/logging/logback/base.xml"/> -->
<jmxConfigurator/>
<logger name="org.springframework.web" level="INFO"/>
<logger name="org.apache.velocity.runtime.log" level="INFO"/>
<!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>[%X{traceid}] %d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %M:%L - %msg %n</Pattern>
</encoder>
</appender>
<appender name="dailyRollingFileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>logs/logback-today-${APP_NAME}.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rolling over -->
<FileNamePattern>logs/${APP_NAME}.%d{yyyy-MM-dd}.log</FileNamePattern>
<!-- keep 30 days' log history -->
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder>
<Pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %msg %n</Pattern>
</encoder>
</appender>
<!-- 异步写入文件 -->
<appender name="ASYNC_ROLLING_FILE" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>256</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="dailyRollingFileAppender"/>
</appender>
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--可以访问的logstash日志收集端口-->
<destination>ip:port</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
<pattern>
{
"app_name":"${APP_NAME}",
"traceid":"%X{traceid}",
"ip": "%X{ip}",
"server_name": "%X{server_name}",
"level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"thread": "%thread",
"class": "%logger{40} - %M:%L",
"message": "%message",
"stack_trace": "%exception{10}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC_ROLLING_FILE"/>
<appender-ref ref="LOGSTASH"/>
</root>
</configuration>
3、logstash配置
input{
tcp{
mode=>"server"
host=>"0.0.0.0"
port=>4560
codec=>json_lines
}
}
output{
elasticsearch{
hosts=>"es:9200"
index=>"springboot-logstash-%{+YYYY.MM.dd}"
}
}