-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcontent.json
More file actions
1 lines (1 loc) · 420 KB
/
content.json
File metadata and controls
1 lines (1 loc) · 420 KB
1
[{"title":"日志体系","date":"2021-08-15T08:41:00.000Z","path":"2021/08/15/日志体系/","text":"日志体系 主题 关注点 公共日志库starter 初始化标准日志行为 基础日志框架 SLF4J作为日志门面。logback是spring提供的默认实现。Log4j2可以代替logback,通过依赖更新。 日志实现 SpringBoot可以配置细粒度日志配置。默认日志是INFO,包括打印WARN和ERROR级别。生成环境不可开启debug级别日志。TraceID必须在每个日志出现。 日志配置 公共库提供了common appender 配置。Debug和Error必须进入严格日志,INFO和WARN使用正常日志即可。 日志追踪 日志追踪支持Servlet阻塞式和异步模式,响应式reactive编程。 日志库 功能 描述 自动配置 通过Springboot自动配置完成日志自动配置,提供给公共配置。 Logback Logback应该使用,由springboot默认提供,是目前非常推荐的日志框架。 Code with SLF4J SLF4J classes应该在平时开发作为日志门面,当我们切换日志实现框架,将不会影响代码。 TraceID TraceID通过UUID生成,每一次打印日志自动带上, 打印日志格式 如果Http header没有携带TraceID,在filter里面自动生成到header。 默认日志过滤器 在公共库提供filter打印请求花费时间,记录聚合服务和调用基础服务的时间开销,调用开始记录时间start和traceId,调用结束时候记录finish结束时间和HTTP Status,同时支持Servlet和reactive。 MDC MDC提供公共的日志信息在logging context,在线程的局部本地变量,跨线程或者异步多线程时候应复制上下文。 日志规范 Fitler中强制生成TraceID,并注入到MDC日志上下文,并调用时间统计。 前后端error schema 后端日志必须打印TraceId,但发生error接口返回TraceId,根据固定的error JSON schema给前端放在报错页面指导用户找客服反馈时候,一定要提供该TraceId和Code。 1234567891011{ \"errorInfo\": [ { \"traceId\": \"53cb73a6-f594-4af6-b406-172b3b4e9dd5\", \"code\": \"RAML_1002\", \"causes\": [ \"Missing 'version'\" ] } ]} Servlet日志过滤器 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980package com.example.demo.log;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import org.slf4j.MDC;import org.springframework.web.filter.OncePerRequestFilter;import javax.servlet.AsyncEvent;import javax.servlet.AsyncListener;import javax.servlet.FilterChain;import javax.servlet.ServletException;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpServletResponse;import java.io.IOException;import java.time.Duration;import java.time.LocalDateTime;import java.util.Map;import java.util.UUID;public class ServletLoggingRequestFilter extends OncePerRequestFilter { private static final Logger log = LoggerFactory.getLogger(ServletLoggingRequestFilter.class); public static final String REQUEST_TRACE_ID = \"Request-Trace-Id\"; @Override protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException { CacheHeaderHTTPServletRequest cacheHeaderHTTPServletRequest = new CacheHeaderHTTPServletRequest(request); LocalDateTime startTime = LocalDateTime.now(); String traceId = cacheHeaderHTTPServletRequest.getHeader(REQUEST_TRACE_ID); if (traceId == null) {//生成traceId traceId = UUID.randomUUID().toString(); cacheHeaderHTTPServletRequest.putHeaders(REQUEST_TRACE_ID, traceId);; MDC.put(REQUEST_TRACE_ID, traceId); } if (log.isInfoEnabled()) { log.info(\"start={} traceId={}\", startTime, traceId); } Map<String, String> contextMap = MDC.getCopyOfContextMap(); try { filterChain.doFilter(cacheHeaderHTTPServletRequest, response); } finally { if (cacheHeaderHTTPServletRequest.isAsyncStarted() && cacheHeaderHTTPServletRequest.isAsyncSupported()) { cacheHeaderHTTPServletRequest.getAsyncContext().addListener(new AsyncListener() { @Override public void onComplete(AsyncEvent event) throws IOException { exitLog(contextMap, startTime, response); } @Override public void onTimeout(AsyncEvent event) throws IOException { exitLog(contextMap, startTime, response); } @Override public void onError(AsyncEvent event) throws IOException { exitLog(contextMap, startTime, response); } @Override public void onStartAsync(AsyncEvent event) throws IOException { } }); } else { exitLog(contextMap, startTime, response); } } } private void exitLog(Map<String, String> contextMap, LocalDateTime startTime, HttpServletResponse response) { MDC.setContextMap(contextMap); LocalDateTime endDateTime = LocalDateTime.now(); long betweenTime = Duration.between(startTime, endDateTime).toMillis(); if (log.isInfoEnabled()) { log.info(\"finish={} status={}\", betweenTime, response.getStatus()); } }} Reactive日志过滤器12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849package com.example.demo.log;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import org.slf4j.MDC;import org.springframework.util.CollectionUtils;import org.springframework.web.server.ServerWebExchange;import org.springframework.web.server.WebFilter;import org.springframework.web.server.WebFilterChain;import java.time.Duration;import java.time.LocalDateTime;import java.util.Collections;import java.util.List;import java.util.Objects;import java.util.UUID;public class ReactiveLoggingRequestFilter implements WebFilter { private static final Logger log = LoggerFactory.getLogger(ReactiveLoggingRequestFilter.class); public static final String REQUEST_TRACE_ID = \"Request-Trace-Id\"; @Override public reactor.core.publisher.Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) { LocalDateTime startTime = LocalDateTime.now(); List<String> traceIds = exchange.getRequest().getHeaders().get(REQUEST_TRACE_ID); if (CollectionUtils.isEmpty(traceIds)) { String traceId = UUID.randomUUID().toString(); traceIds = Collections.singletonList(traceId); exchange.getRequest().getHeaders().put(REQUEST_TRACE_ID, traceIds); MDC.put(REQUEST_TRACE_ID, traceId); } List<String> finalTraceIds = traceIds; return chain.filter(exchange).doOnSubscribe(subscription -> { if (log.isInfoEnabled()) { log.info(\"start={} traceId={}\", startTime, finalTraceIds.get(0)); } }).doFinally(signalType -> { int status = Objects.requireNonNull(exchange.getResponse().getStatusCode()).value(); LocalDateTime endDateTime = LocalDateTime.now(); long betweenTime = Duration.between(startTime, endDateTime).toMillis(); if (log.isInfoEnabled()) { log.info(\"finish={} status={}\", betweenTime, status); } }); }} 敏感数据日志 敏感数据不能记录在WARN和INFO级别日志。用*星号代替敏感数据的打印。如果对排查故障没有任何帮助,开发人员一定要谨慎打印。 日志配置日志模板 格式 配置 例子值 目的 ${TIMESTAMP} %clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} 2021-08-15 15:58:51.772 执行时间戳 ${LOG_LEVEL_PATTERN} %clr(-%5p}) DEBUG 日志级别 ${TraceId} %clr(tId=%mdc{Request-Trace-Id}) 7375176b-715d-408d-b255-eea662ee056 可追踪ID log example12342021-08-15 16:24:36.552 DEBUG 10896 --- [nio-8080-exec-1] tId=7375176b-715d-408d-b255-eea662ee0565 Writing [com.example.demo.raml.ErrorPayload@793475a7[]]2021-08-15 16:24:36.564 DEBUG 10896 --- [nio-8080-exec-1] tId=7375176b-715d-408d-b255-eea662ee0565 Resolved [com.example.demo.raml.RequestContextException: Missing 'version']2021-08-15 16:24:36.564 DEBUG 10896 --- [nio-8080-exec-1] tId=7375176b-715d-408d-b255-eea662ee0565 Completed 400 BAD_REQUEST2021-08-15 16:24:36.565 INFO 10896 --- [nio-8080-exec-1] tId=7375176b-715d-408d-b255-eea662ee0565 finish=64 status=400xml logback-spring.xml123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196<?xml version=\"1.0\" encoding=\"UTF-8\"?><!-- 日志级别从低到高分为TRACE < DEBUG < INFO < WARN < ERROR < FATAL,如果设置为WARN,则低于WARN的信息都不会输出 --><!-- scan:当此属性设置为true时,配置文档如果发生改变,将会被重新加载,默认值为true --><!-- scanPeriod:设置监测配置文档是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。 当scan为true时,此属性生效。默认的时间间隔为1分钟。 --><!-- debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 --><configuration scan=\"true\" scanPeriod=\"10 seconds\"> <contextName>logback</contextName> <!-- name的值是变量的名称,value的值时变量定义的值。通过定义的值会被插入到logger上下文中。定义后,可以使“${}”来使用变量。 --> <property name=\"log.path\" value=\"/logs\" /> <!--0. 日志格式和颜色渲染 --> <!-- 彩色日志依赖的渲染类 --> <conversionRule conversionWord=\"clr\" converterClass=\"org.springframework.boot.logging.logback.ColorConverter\" /> <conversionRule conversionWord=\"wex\" converterClass=\"org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter\" /> <conversionRule conversionWord=\"wEx\" converterClass=\"org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter\" /> <property name=\"TraceId\" value=\"%clr(tId=%mdc{Request-Trace-Id})\" /> <!-- 彩色日志格式 --> <property name=\"CONSOLE_LOG_PATTERN\" value=\"${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} ${TraceId} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx} }\"/> <!--1. 输出到控制台--> <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\"> <!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息--> <filter class=\"ch.qos.logback.classic.filter.ThresholdFilter\"> <level>debug</level> </filter> <encoder> <Pattern>${CONSOLE_LOG_PATTERN}</Pattern> <!-- 设置字符集 --> <charset>UTF-8</charset> </encoder> </appender> <!--2. 输出到文档--> <!-- 2.1 level为 DEBUG 日志,时间滚动输出 --> <appender name=\"DEBUG_FILE\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\"> <!-- 正在记录的日志文档的路径及文档名 --> <file>${log.path}/web_debug.log</file> <!--日志文档输出格式--> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>UTF-8</charset> <!-- 设置字符集 --> </encoder> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class=\"ch.qos.logback.core.rolling.TimeBasedRollingPolicy\"> <!-- 日志归档 --> <fileNamePattern>${log.path}/web-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class=\"ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP\"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> <!--日志文档保留天数--> <maxHistory>15</maxHistory> </rollingPolicy> <!-- 此日志文档只记录debug级别的 --> <filter class=\"ch.qos.logback.classic.filter.LevelFilter\"> <level>debug</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <!-- 2.2 level为 INFO 日志,时间滚动输出 --> <appender name=\"INFO_FILE\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\"> <!-- 正在记录的日志文档的路径及文档名 --> <file>${log.path}/web_info.log</file> <!--日志文档输出格式--> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>UTF-8</charset> </encoder> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class=\"ch.qos.logback.core.rolling.TimeBasedRollingPolicy\"> <!-- 每天日志归档路径以及格式 --> <fileNamePattern>${log.path}/web-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class=\"ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP\"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> <!--日志文档保留天数--> <maxHistory>15</maxHistory> </rollingPolicy> <!-- 此日志文档只记录info级别的 --> <filter class=\"ch.qos.logback.classic.filter.LevelFilter\"> <level>info</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <!-- 2.3 level为 WARN 日志,时间滚动输出 --> <appender name=\"WARN_FILE\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\"> <!-- 正在记录的日志文档的路径及文档名 --> <file>${log.path}/web_warn.log</file> <!--日志文档输出格式--> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>UTF-8</charset> <!-- 此处设置字符集 --> </encoder> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class=\"ch.qos.logback.core.rolling.TimeBasedRollingPolicy\"> <fileNamePattern>${log.path}/web-warn-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class=\"ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP\"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> <!--日志文档保留天数--> <maxHistory>15</maxHistory> </rollingPolicy> <!-- 此日志文档只记录warn级别的 --> <filter class=\"ch.qos.logback.classic.filter.LevelFilter\"> <level>warn</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <!-- 2.4 level为 ERROR 日志,时间滚动输出 --> <appender name=\"ERROR_FILE\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\"> <!-- 正在记录的日志文档的路径及文档名 --> <file>${log.path}/web_error.log</file> <!--日志文档输出格式--> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern> <charset>UTF-8</charset> <!-- 此处设置字符集 --> </encoder> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class=\"ch.qos.logback.core.rolling.TimeBasedRollingPolicy\"> <fileNamePattern>${log.path}/web-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class=\"ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP\"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> <!--日志文档保留天数--> <maxHistory>15</maxHistory> </rollingPolicy> <!-- 此日志文档只记录ERROR级别的 --> <filter class=\"ch.qos.logback.classic.filter.LevelFilter\"> <level>ERROR</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <!-- <logger>用来设置某一个包或者具体的某一个类的日志打印级别、 以及指定<appender>。<logger>仅有一个name属性, 一个可选的level和一个可选的addtivity属性。 name:用来指定受此logger约束的某一个包或者具体的某一个类。 level:用来设置打印级别,大小写无关:TRACE, DEBUG, INFO, WARN, ERROR, ALL 和 OFF, 还有一个特俗值INHERITED或者同义词NULL,代表强制执行上级的级别。 如果未设置此属性,那么当前logger将会继承上级的级别。 addtivity:是否向上级logger传递打印信息。默认是true。 <logger name=\"org.springframework.web\" level=\"info\"/> <logger name=\"org.springframework.scheduling.annotation.ScheduledAnnotationBeanPostProcessor\" level=\"INFO\"/> --> <!-- 使用mybatis的时候,sql语句是debug下才会打印,而这里我们只配置了info,所以想要查看sql语句的话,有以下两种操作: 第一种把<root level=\"info\">改成<root level=\"DEBUG\">这样就会打印sql,不过这样日志那边会出现很多其他消息 第二种就是单独给dao下目录配置debug模式,代码如下,这样配置sql语句会打印,其他还是正常info级别: 【logging.level.org.mybatis=debug logging.level.dao=debug】 --> <!-- root节点是必选节点,用来指定最基础的日志输出级别,只有一个level属性 level:用来设置打印级别,大小写无关:TRACE, DEBUG, INFO, WARN, ERROR, ALL 和 OFF, 不能设置为INHERITED或者同义词NULL。默认是DEBUG 可以包含零个或多个元素,标识这个appender将会添加到这个logger。 --> <!-- 4. 最终的策略 --> <!-- 4.1 开发环境:打印控制台--> <springProfile name=\"dev\"> <logger name=\"com.sdcm.pmp\" level=\"debug\"/> </springProfile> <root level=\"info\"> <appender-ref ref=\"CONSOLE\" /> <appender-ref ref=\"DEBUG_FILE\" /> <appender-ref ref=\"INFO_FILE\" /> <appender-ref ref=\"WARN_FILE\" /> <appender-ref ref=\"ERROR_FILE\" /> </root> <!-- 4.2 生产环境:输出到文档 <springProfile name=\"pro\"> <root level=\"info\"> <appender-ref ref=\"CONSOLE\" /> <appender-ref ref=\"DEBUG_FILE\" /> <appender-ref ref=\"INFO_FILE\" /> <appender-ref ref=\"ERROR_FILE\" /> <appender-ref ref=\"WARN_FILE\" /> </root> </springProfile> --></configuration> 参考资料 logback-spring.xml配置 - mahoshojo - 博客园 配套代码","tags":[{"name":"架构","slug":"架构","permalink":"https://caochikai.github.io/tags/%E6%9E%B6%E6%9E%84/"}]},{"title":"SpringBoot异常处理体系","date":"2021-08-14T14:56:00.000Z","path":"2021/08/14/SpringBoot异常处理体系/","text":"SpringBoot异常处理体系 在SpringMVC中通过组合使用注解@ControllerAdvice、@ModelAttribute和@InitBinder(“b),配合 HandlerExceptionResolver作为全局性的定制。包括 Handler 映射、数据绑定以及目标方法执行时发生的异常。 @ExceptionHandler:统一的异常处理控制器方法 @ModelAttribute:共用方法添加渲染视图的数据模型属性 @InitBinder:共用方法初始化控制器方法调用使用的数据绑定器 异常默认规则 默认情况下,Spring Boot提供/error处理所有错误的映射 对于机器客户端,它将生成JSON响应,其中包含错误,HTTP状态和异常消息的详细信息。对于浏览器客户端,响应一个“ whitelabel”错误视图,以HTML格式呈现相同的数据 要完全替换默认行为,可以实现 ErrorController 并注册该类型的Bean定义,或添加ErrorAttributes类型的组件以使用现有机制但替换其内容,或添加(templates/static)/error/(4xx|5xx)页面会被自动解析;我们内部就自定义实现了ErrorController 浏览器客户端:Content-Type: text/html;charset=UTF-8 123456789101112131415161718192021GET http://localhost:8080/api/xxHTTP/1.1 404 Vary: OriginVary: Access-Control-Request-MethodVary: Access-Control-Request-HeadersContent-Type: text/html;charset=UTF-8Content-Language: zh-CNContent-Length: 286Date: Sat, 14 Aug 2021 09:41:34 GMTKeep-Alive: timeout=60Connection: keep-alive<html><body><h1>Whitelabel Error Page</h1><p>This application has no explicit mapping for /error, so you are seeing this as a fallback.</p><div id='created'>Sat Aug 14 17:41:34 CST 2021</div><div>There was an unexpected error (type=Not Found, status=404).</div><div></div></body></html> 机器客户端:Content-Type: application/json 12345678910111213141516171819GET http://localhost:8080/api/xxHTTP/1.1 404 Vary: OriginVary: Access-Control-Request-MethodVary: Access-Control-Request-HeadersContent-Type: application/jsonTransfer-Encoding: chunkedDate: Sat, 14 Aug 2021 09:42:44 GMTKeep-Alive: timeout=60Connection: keep-alive{ \"timestamp\": \"2021-08-14T09:42:44.755+00:00\", \"status\": 404, \"error\": \"Not Found\", \"message\": \"\", \"path\": \"/api/xx\"} 异常处理自动配置原理 ErrorMvcAutoConfiguration 自动配置异常处理规则 DefaultErrorAttributes定义错误页面中可以包含哪些数据。 BasicErrorController 定义JSON(Content-Type: application/json)和白页Whitelabel Error Page(Content-Type: text/html)两种适配响应。 123456789101112131415161718192021222324**public class DefaultErrorAttributes implements ErrorAttributes**, **HandlerExceptionResolver{...**包含哪些数据。 **@Override public Map<String, Object> getErrorAttributes(WebRequest webRequest, ErrorAttributeOptions options) { Map<String, Object> errorAttributes = getErrorAttributes(webRequest, options.isIncluded(Include.STACK_TRACE)); if (Boolean.TRUE.equals(this.includeException)) { options = options.including(Include.EXCEPTION); } if (!options.isIncluded(Include.EXCEPTION)) { errorAttributes.remove(\"exception\"); } if (!options.isIncluded(Include.STACK_TRACE)) { errorAttributes.remove(\"trace\"); } if (!options.isIncluded(Include.MESSAGE) && errorAttributes.get(\"message\") != null) { errorAttributes.put(\"message\", \"\"); } if (!options.isIncluded(Include.BINDING_ERRORS)) { errorAttributes.remove(\"errors\"); } return errorAttributes; }...}** 123456789101112131415161718192021222324252627282930313233343536@Controller@RequestMapping(\"${server.error.path:${error.path:/error}}\")//server.error.path配置可自定义error路径public class BasicErrorController extends AbstractErrorController {...定义白页(**Content-Type: text/html**)适配响应。 @RequestMapping(produces = MediaType.TEXT_HTML_VALUE) public ModelAndView errorHtml(HttpServletRequest request, HttpServletResponse response) { HttpStatus status = getStatus(request); Map<String, Object> model = Collections .unmodifiableMap(getErrorAttributes(request, getErrorAttributeOptions(request, MediaType.TEXT_HTML))); response.setStatus(status.value()); ModelAndView modelAndView = resolveErrorView(request, response, status, model); return (modelAndView != null) ? modelAndView : new ModelAndView(\"error\", model); }protected ModelAndView resolveErrorView(HttpServletRequest request, HttpServletResponse response, HttpStatus status, Map<String, Object> model) {//org.springframework.boot.autoconfigure.web.servlet.error.DefaultErrorViewResolver是默认error视图解析器 for (ErrorViewResolver resolver : this.errorViewResolvers) { ModelAndView modelAndView = resolver.resolveErrorView(request, status, model); if (modelAndView != null) { return modelAndView; } } return null; }//json(**Content-Type: application/json**)适配响应。 @RequestMapping public ResponseEntity<Map<String, Object>> error(HttpServletRequest request) { HttpStatus status = getStatus(request); if (status == HttpStatus.NO_CONTENT) { return new ResponseEntity<>(status); } Map<String, Object> body = getErrorAttributes(request, getErrorAttributeOptions(request, MediaType.ALL)); return new ResponseEntity<>(body, status); }...} org.springframework.boot.autoconfigure.web.servlet.error.ErrorMvcAutoConfiguration.StaticView:处理白页error内容来源 123456789101112131415161718192021222324252627282930313233343536###Simple View implementation that writes a default HTML error page.private static class StaticView implements View { private static final MediaType TEXT_HTML_UTF8 = new MediaType(\"text\", \"html\", StandardCharsets.UTF_8);...@Override public void render(Map<String, ?> model, HttpServletRequest request, HttpServletResponse response) throws Exception { if (response.isCommitted()) { String message = getMessage(model); logger.error(message); return; } response.setContentType(TEXT_HTML_UTF8.toString()); StringBuilder builder = new StringBuilder(); Object timestamp = model.get(\"timestamp\"); Object message = model.get(\"message\"); Object trace = model.get(\"trace\"); if (response.getContentType() == null) { response.setContentType(getContentType()); } builder.append(\"<html><body><h1>Whitelabel Error Page</h1>\").append( \"<p>This application has no explicit mapping for /error, so you are seeing this as a fallback.</p>\") .append(\"<div id='created'>\").append(timestamp).append(\"</div>\") .append(\"<div>There was an unexpected error (type=\").append(htmlEscape(model.get(\"error\"))) .append(\", status=\").append(htmlEscape(model.get(\"status\"))).append(\").</div>\"); if (message != null) { builder.append(\"<div>\").append(htmlEscape(message)).append(\"</div>\"); } if (trace != null) { builder.append(\"<div style='white-space:pre-wrap;'>\").append(htmlEscape(trace)).append(\"</div>\"); } builder.append(\"</body></html>\"); response.getWriter().append(builder.toString()); }...} org.springframework.boot.autoconfigure.web.servlet.error.DefaultErrorViewResolver 123456789101112131415161718192021222324252627282930313233343536373839404142//使用status code和status series在'/error'下搜索模板和静态资产。//例如, HTTP 404将搜索(按特定顺序)://'/<templates>/error/404.<ext>'//'/<static>/error/404.html'//'/<templates>/error/4xx.<ext>'//'/<static>/error/4xx.html'public class DefaultErrorViewResolver implements ErrorViewResolver, Ordered {... @Override public ModelAndView resolveErrorView(HttpServletRequest request, HttpStatus status, Map<String, Object> model) { ModelAndView modelAndView = resolve(String.valueOf(status.value()), model); if (modelAndView == null && SERIES_VIEWS.containsKey(status.series())) {//处理不同状态码的视图 modelAndView = resolve(SERIES_VIEWS.get(status.series()), model); } return modelAndView; } private ModelAndView resolve(String viewName, Map<String, Object> model) {//**拼接不同错误HTTP状态码的视图名称,比如error/500.html** String errorViewName = \"error/\" + viewName; TemplateAvailabilityProvider provider = this.templateAvailabilityProviders.getProvider(errorViewName, this.applicationContext); if (provider != null) { return new ModelAndView(errorViewName, model); } return resolveResource(errorViewName, model); } private ModelAndView resolveResource(String viewName, Map<String, Object> model) { for (String location : this.resources.getStaticLocations()) { try {//**获取静态资源返回** Resource resource = this.applicationContext.getResource(location); resource = resource.createRelative(viewName + \".html\"); if (resource.exists()) { return new ModelAndView(new HtmlResourceView(resource), model); } } catch (Exception ex) { } } 异常处理步骤 执行目标方法有任何异常都会被catch Exception和Throwable,并且用NestedServletException dispatchException包装异常 进入视图解析的processDispatchResult方法 执行异常处理#processHandlerException方法,找不到可处理的异常处理器,将会继续抛出异常,最终触发Servlet携带异常信息转发请求/error,进入上面提到的BasicErrorController 1234567891011121314doInvoke:192, InvocableHandlerMethod (org.springframework.web.method.support)invokeForRequest:141, InvocableHandlerMethod (org.springframework.web.method.support)invokeAndHandle:106, ServletInvocableHandlerMethod (org.springframework.web.servlet.mvc.method.annotation)invokeHandlerMethod:894, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)handleInternal:808, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)handle:87, AbstractHandlerMethodAdapter (org.springframework.web.servlet.mvc.method)doDispatch:1060, DispatcherServlet (org.springframework.web.servlet)doService:962, DispatcherServlet (org.springframework.web.servlet)processRequest:1006, FrameworkServlet (org.springframework.web.servlet)doGet:898, FrameworkServlet (org.springframework.web.servlet)service:626, HttpServlet (javax.servlet.http)service:883, FrameworkServlet (org.springframework.web.servlet)service:733, HttpServlet (javax.servlet.http)... DispatcherServlet1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950public class DispatcherServlet extends FrameworkServlet {...protected void doDispatch(HttpServletRequest request, HttpServletResponse response) throws Exception { Exception dispatchException = null; try {... // Determine handler for the current request. mappedHandler = getHandler(processedRequest); if (mappedHandler == null) { noHandlerFound(processedRequest, response); return; } // Determine handler adapter for the current request. HandlerAdapter ha = getHandlerAdapter(mappedHandler.getHandler()); ... //实际调用Controller mv = ha.handle(processedRequest, response, mappedHandler.getHandler()); if (asyncManager.isConcurrentHandlingStarted()) { return; } applyDefaultViewName(processedRequest, mv); mappedHandler.applyPostHandle(processedRequest, response, mv); }//目标方法运行期间有任何异常都会被catch catch (Exception ex) { dispatchException = ex; } catch (Throwable err) { // As of 4.3, we're processing Errors thrown from handler methods as well, // making them available for @ExceptionHandler methods and other scenarios. dispatchException = new NestedServletException(\"Handler dispatch failed\", err); } processDispatchResult(processedRequest, response, mappedHandler, mv, dispatchException); } catch (Exception ex) { triggerAfterCompletion(processedRequest, response, mappedHandler, ex); } catch (Throwable err) { triggerAfterCompletion(processedRequest, response, mappedHandler, new NestedServletException(\"Handler processing failed\", err)); } finally { ... } ...} InvocableHandlerMethod1234567891011121314151617181920212223242526272829303132333435public class InvocableHandlerMethod extends HandlerMethod {...@Nullable protected Object doInvoke(Object... args) throws Exception { Method method = getBridgedMethod(); ReflectionUtils.makeAccessible(method); try {...真正调用的地方 return method.invoke(getBean(), args); } catch (IllegalArgumentException ex) { assertTargetBean(method, getBean(), args); String text = (ex.getMessage() != null ? ex.getMessage() : \"Illegal argument\"); throw new IllegalStateException(formatInvokeError(text, args), ex); }//**往上抛出异常给DispatcherServlet** catch (InvocationTargetException ex) { // Unwrap for HandlerExceptionResolvers ... Throwable targetException = ex.getTargetException(); if (targetException instanceof RuntimeException) { throw (RuntimeException) targetException; } else if (targetException instanceof Error) { throw (Error) targetException; } else if (targetException instanceof Exception) { throw (Exception) targetException; } else { throw new IllegalStateException(formatInvokeError(\"Invocation failure\", args), targetException); } } }...} org.springframework.web.servlet.DispatcherServlet#processHandlerException 默认注册了两种异常处理器,第一个执行是默认并且上面提到的DefaultErrorAttributes,把异常信息保存到request域,并且返回null;第二个是组合异常处理器集合下ExceptionHandlerExceptionResolver就是处理映射@ExceptionHandler注解方法。@Order调整遍历异常解析器优先级。 12345678910111213141516171819202122232425262728293031323334353637383940414243@Nullable protected ModelAndView processHandlerException(HttpServletRequest request, HttpServletResponse response, @Nullable Object handler, Exception ex) throws Exception { //成功和错误响应可能使用不同的内容类型,所以先remove request.removeAttribute(HandlerMapping.PRODUCIBLE_MEDIA_TYPES_ATTRIBUTE); // 检查注册的 HandlerExceptionResolvers... ModelAndView exMv = null; if (this.handlerExceptionResolvers != null) {、//遍历所有的 handlerExceptionResolvers集合,看谁能处理当前异常 for (HandlerExceptionResolver resolver : this.handlerExceptionResolvers) { exMv = resolver.resolveException(request, response, handler, ex); if (exMv != null) { break; } } } if (exMv != null) { if (exMv.isEmpty()) {//如果上面的ModelAndView里view和model属性都是空,则返回null request.setAttribute(EXCEPTION_ATTRIBUTE, ex); return null; } // 对于一个简单的错误模型,我们可能仍然需要视图名称转换...... if (!exMv.hasView()) { String defaultViewName = getDefaultViewName(request); if (defaultViewName != null) { exMv.setViewName(defaultViewName); } } if (logger.isTraceEnabled()) { logger.trace(\"Using resolved error view: \" + exMv, ex); } else if (logger.isDebugEnabled()) { logger.debug(\"Using resolved error view: \" + exMv); } WebUtils.exposeErrorRequestAttributes(request, ex, getServletName()); return exMv; } throw ex; } @ControllerAdvice注册ApplicationContext123456789101112131415/** * 通过策略模式构建ApplicationContext属性 */ protected void initStrategies(ApplicationContext context) { initMultipartResolver(context); initLocaleResolver(context); initThemeResolver(context); initHandlerMappings(context); initHandlerAdapters(context);//构建以及注册HandlerExceptionResolvers initHandlerExceptionResolvers(context); initRequestToViewNameTranslator(context); initViewResolvers(context); initFlashMapManager(context); } ExceptionResolver继承体系: ExceptionResolver继承体系 参考资料 springboot资料 配套代码","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"Context-length跨服务超时","date":"2021-08-13T15:13:00.000Z","path":"2021/08/13/Context-length跨服务超时/","text":"Context-length跨服务超时一、背景 今天被拉进远程会议,博主是热心提供疑难bug解决的顾问,情况是跨语言接口客户端调用超时并且服务端打印内部错误500,reactor-netty封装的DefaultWebClient调用Python flash接口,DefaultWebClient无论加大多少时间都会超时,Python接口只打印报错500,打印无提供更加详细信息。前端发送POST请求调用从聚合服务Process API(PAPI)中获取数据,PAPI通过DefaultWebClient发送GET请求到Python编写接口SAPI,因为调用SAPI时须要携带header(Context-length:64),出问题地方就是Context-length原因,给team解释一下该header影响。其实本来公共封装DefaultWebClient库GET方法过滤了Context-length(驼峰命名),已经考虑到该现象,但是另一个公共库封装的RequestContext类getHeader方法全部小写化context-length(不符合http标准),这就闹了很大乌龙了,反馈给公共库贡献者修复该issue。 Content-Length: Content-Length 是一个实体消息首部,用来指明发送给接收方的消息主体的大小,即用十进制数字表示的八位字节的数目。Content-Length指示出报文中实体主体的字节大小,它包含了所有的编码内容; 比如, 对文本文件进行了gzip压缩的话,Content-Length指的就是压缩后的大小而不是原始大小。IETF(The Internet Engineering Task Force)发布了绝大多数国际互联网技术标准,规定HTTP发送方不得在任何消息中发送 Content-Length, 但是在微服务之间调用很容易造成误传。无论是SpringBoot内嵌的tomcat还是Python flash都实现了该http标准,Python暴露底层库error信息较少,只有个500;Tomcat NioEndpoint$NioSocketWrapper根据Content-Length读取Socket缓冲区,直到超时发现,缓冲区字节长度为-1,SpringMvc报错如下: Content-Length与实际消息长度不一致, 程序会发生比较奇怪的异常: 过长则会导致无响应直到超时。 过短则会截断请求被截断, 而且下一个请求解析出现错乱(上一次请求将会将入下一次请求)。 1234###实际测试命令curl --location --request GET 'http://localhost:8080/api/version?version=1.0' \\--header 'Content-Type: application/json' \\--header 'Content-Length: 64' 123456789101112131415161718192021222324252627282930313233342021-08-13 22:43:12.759 DEBUG 13692 --- [nio-8080-exec-1] o.s.web.servlet.DispatcherServlet : GET \"/api/version?version=1.0\", parameters={masked}2021-08-13 22:43:12.772 DEBUG 13692 --- [nio-8080-exec-1] o.a.coyote.http11.Http11InputBuffer : Before fill(): parsingHeader: [false], parsingRequestLine: [false], parsingRequestLinePhase: [0], parsingRequestLineStart: [0], byteBuffer.position(): [301], byteBuffer.limit(): [301], end: [301]2021-08-13 22:43:12.772 DEBUG 13692 --- [nio-8080-exec-1] o.a.tomcat.util.net.SocketWrapperBase : Socket: [org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper@4166e5bd:org.apache.tomcat.util.net.NioChannel@2a06c6f5:java.nio.channels.SocketChannel[connected local=/0:0:0:0:0:0:0:1:8080 remote=/0:0:0:0:0:0:0:1:52338]], Read from buffer: [0]2021-08-13 22:43:15.356 DEBUG 13692 --- [(4)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(4)-192.168.3.2: (port 52325) connection closed2021-08-13 22:43:15.356 DEBUG 13692 --- [(4)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(4)-192.168.3.2: close connection2021-08-13 22:43:15.356 DEBUG 13692 --- [(3)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(3)-192.168.3.2: (port 52325) connection closed2021-08-13 22:43:15.356 DEBUG 13692 --- [(3)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(3)-192.168.3.2: close connection2021-08-13 22:43:15.478 DEBUG 13692 --- [(5)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(5)-192.168.3.2: accepted socket from [192.168.3.2:52339]2021-08-13 22:43:15.478 DEBUG 13692 --- [(5)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(5)-192.168.3.2: (port 52325) op = 802021-08-13 22:43:37.896 DEBUG 13692 --- [(2)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(2)-192.168.3.2: (port 52325) connection closed2021-08-13 22:43:37.896 DEBUG 13692 --- [(2)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(2)-192.168.3.2: close connection2021-08-13 22:43:38.563 DEBUG 13692 --- [(5)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(5)-192.168.3.2: (port 52325) connection closed2021-08-13 22:43:38.563 DEBUG 13692 --- [(5)-192.168.3.2] sun.rmi.transport.tcp : RMI TCP Connection(5)-192.168.3.2: close connection2021-08-13 22:44:08.274 DEBUG 13692 --- [alina-utility-1] org.apache.catalina.session.ManagerBase : Start expire sessions StandardManager at 1628865848274 sessioncount 02021-08-13 22:44:08.274 DEBUG 13692 --- [alina-utility-1] org.apache.catalina.session.ManagerBase : End expire sessions StandardManager processingTime 0 expired sessions: 02021-08-13 22:44:12.776 DEBUG 13692 --- [nio-8080-exec-1] o.s.web.method.HandlerMethod : Could not resolve parameter [0] in org.springframework.http.ResponseEntity<com.example.demo.Greeting> com.example.demo.TestController.test(com.example.demo.raml.RequestContext<java.lang.Void>): I/O error while reading input message; nested exception is org.apache.catalina.connector.ClientAbortException: java.net.SocketTimeoutException2021-08-13 22:44:12.778 WARN 13692 --- [nio-8080-exec-1] .w.s.m.s.DefaultHandlerExceptionResolver : Resolved [org.springframework.http.converter.HttpMessageNotReadableException: I/O error while reading input message; nested exception is org.apache.catalina.connector.ClientAbortException: java.net.SocketTimeoutException]2021-08-13 22:57:36.398 DEBUG 13692 --- [nio-8080-exec-5] o.apache.coyote.http11.Http11Processor : Error parsing HTTP request headerjava.io.EOFException: null at org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper.fillReadBuffer(NioEndpoint.java:1345) ~[tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper.read(NioEndpoint.java:1255) ~[tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.coyote.http11.Http11InputBuffer.fill(Http11InputBuffer.java:794) ~[tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.coyote.http11.Http11InputBuffer.parseRequestLine(Http11InputBuffer.java:359) ~[tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.coyote.http11.Http11Processor.service(Http11Processor.java:261) ~[tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.coyote.AbstractProcessorLight.process(AbstractProcessorLight.java:65) [tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.coyote.AbstractProtocol$ConnectionHandler.process(AbstractProtocol.java:893) [tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1707) [tomcat-embed-core-9.0.45.jar:9.0.45] at org.apache.tomcat.util.net.SocketProcessorBase.run(SocketProcessorBase.java:49) [tomcat-embed-core-9.0.45.jar:9.0.45] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [na:1.8.0_231] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [na:1.8.0_231] at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61) [tomcat-embed-core-9.0.45.jar:9.0.45] at java.lang.Thread.run(Thread.java:748) [na:1.8.0_231] 堆栈源码细究1234567891011121314151617181920212223242526272829303132private int fillReadBuffer(boolean block, ByteBuffer to) throws IOException { int nRead; NioChannel socket = getSocket(); if (socket == NioChannel.CLOSED_NIO_CHANNEL) { throw new ClosedChannelException(); } if (block) { Selector selector = null; try { selector = pool.get(); } catch (IOException x) { // Ignore } try { nRead = pool.read(to, socket, selector, getReadTimeout()); } finally { if (selector != null) { pool.put(selector); } } } else {//从此通道读取字节序列到给定缓冲区。//参数:to – 要传输字节的缓冲区//返回:读取的字节数,可能为零,如果通道已到达流结束,则为 -1: nRead = socket.read(to); if (nRead == -1) {//满足nRead == -1,抛出该异常! throw new EOFException(); } } return nRead;} 提供Feign版本(内部代码无法公布,提供网络版本)12345678910111213141516171819202122232425262728293031323334353637383940414243import feign.RequestInterceptor;import feign.RequestTemplate;import org.springframework.context.annotation.Configuration;import org.springframework.http.HttpHeaders;import org.springframework.web.context.request.RequestContextHolder;import org.springframework.web.context.request.ServletRequestAttributes;import javax.servlet.http.HttpServletRequest;import java.util.Enumeration;import java.util.LinkedHashMap;import java.util.Map;@Configurationpublic class FeginInterceptor implements RequestInterceptor { @Override public void apply(RequestTemplate requestTemplate) { try { Map<String,String> headers = getHeaders(); for(String headerName : headers.keySet()){ requestTemplate.header(headerName, headers.get(headerName)); } }catch (Exception e){ e.printStackTrace(); } } private Map<String, String> getHeaders(){ HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest(); Map<String, String> map = new LinkedHashMap<>(); Enumeration<String> enumeration = request.getHeaderNames(); while (enumeration.hasMoreElements()) { String key = enumeration.nextElement(); String value = request.getHeader(key);//改动优化使用标准HttpHeaders if (HttpHeaders.CONTENT_LENGTH.equals(key)) { continue; } map.put(key, value); } return map; }} 参考资料 MDN Content-Length Http协议标准 springCloud 使用feign复制请求头调用其余服务 content-length不一致致使调用失败 用了这么久HTTP, 你是否了解Content-Length和Transfer-Encoding ? 配套代码 字节的","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"Controller自定义参数类型","date":"2021-08-11T14:15:00.000Z","path":"2021/08/11/Controller自定义参数类型/","text":"Controller自定义参数类型一、需求 RAML设计的接口如何确定每次请求都符合设计,可以通过自定义Controller参数来做解析和校验请求,首先自定义RequestContext,让Controller支持该类型参数,实现类似HttpServletRequest、HttpSession自动就赋值的效果。 HandlerMethodArgumentResolver HandlerMethodArgumentResolver组件的作用主要是用来做参数解析及校验的,包含2个方法 123456/** 是否是支持的类型 **/boolean supportsParameter(MethodParameter parameter);/** 具体解析参数方法 **/Object resolveArgument(MethodParameter parameter, ModelAndViewContainer mavContainer, NativeWebRequest webRequest, WebDataBinderFactory binderFactory) throws Exception; RequestContext12345678910111213141516171819202122232425package com.example.demo.raml;import org.springframework.http.HttpMethod;import org.springframework.util.MultiValueMap;import java.util.Optional;public interface RequestContext<T> { MultiValueMap<String, String> getHeaders(); Optional<String> getParamter(String key); Optional<String> getUrlPath(); Optional<T> getBody(); Class<?> getBodyClass(); HttpMethod getMethod(); MultiValueMap<String, String> getQueryParameterMap(); MultiValueMap<String, String> getUrlParameterMap();} 继承AbstractMessageConverterMethodArgumentResolver AbstractMessageConverterMethodArgumentResolver实现了HandlerMethodArgumentResolver,通过使用 HttpMessageConverters 从请求正文中读取来解析方法参数值的基类,利用HttpMessageConverter将输入流转换成对应的参数。 RequestContextProcessor 参数解析器 通过继承AbstractMessageConverterMethodArgumentResolver,每次请求到Controller有RequestContext类型参数,supportsParameter方法就会返回为true,从而触发resolveArgument方法构建RequestContext。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384package com.example.demo.raml;import com.fasterxml.jackson.core.JsonProcessingException;import com.fasterxml.jackson.databind.ObjectMapper;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.core.MethodParameter;import org.springframework.http.HttpMethod;import org.springframework.http.converter.HttpMessageConverter;import org.springframework.http.server.ServletServerHttpRequest;import org.springframework.util.Assert;import org.springframework.util.MultiValueMap;import org.springframework.web.bind.support.WebDataBinderFactory;import org.springframework.web.context.request.NativeWebRequest;import org.springframework.web.context.request.RequestAttributes;import org.springframework.web.method.support.ModelAndViewContainer;import org.springframework.web.servlet.HandlerMapping;import org.springframework.web.servlet.mvc.method.annotation.AbstractMessageConverterMethodArgumentResolver;import java.lang.reflect.ParameterizedType;import java.lang.reflect.Type;import java.util.*;public class RequestContextProcessor extends AbstractMessageConverterMethodArgumentResolver { @Autowired private ObjectMapper objectMapper; public RequestContextProcessor(List<HttpMessageConverter<?>> converters) { super(converters); } @Override public boolean supportsParameter(MethodParameter methodParameter) { return HttpRequestContext.class == methodParameter.getParameterType() | RequestContext.class == methodParameter.getParameterType(); } @Override public Object resolveArgument(MethodParameter methodParameter, ModelAndViewContainer modelAndViewContainer, NativeWebRequest nativeWebRequest, WebDataBinderFactory webDataBinderFactory) throws Exception { ServletServerHttpRequest inputMessage = createInputMessage(nativeWebRequest); HttpMethod httpMethod = inputMessage.getMethod(); String endpointPathPattern = (String) nativeWebRequest.getAttribute(HandlerMapping.BEST_MATCHING_PATTERN_ATTRIBUTE, RequestAttributes.SCOPE_REQUEST);//获取headers,queryParameterMap ,urlParameterHashMap,body MultiValueMap<String, String> headers = inputMessage.getHeaders(); Map<String, String[]> queryParameterMap = inputMessage.getServletRequest().getParameterMap(); MultiValueMap<String, String> parameterValueMap = MapUtil.convertArrayMapToParamMap(queryParameterMap); Map<String, String> urlParameterHashMap = (Map<String, String>) nativeWebRequest.getAttribute(HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE, RequestAttributes.SCOPE_REQUEST); MultiValueMap<String, String> urlParameterMap = MapUtil.convertHashMapToParamMap(urlParameterHashMap);//继承AbstractMessageConverterMethodArgumentResolver好处就是能方便拿到body String body = (String) readWithMessageConverters(nativeWebRequest, methodParameter, String.class); Object responseBody = null; Class<?> bodyClass = getClass(methodParameter); if (HttpMethod.POST == httpMethod || HttpMethod.PUT == httpMethod) { if (bodyClass.isAssignableFrom(String.class)) { responseBody = body; } else { try { responseBody = objectMapper.readValue(body, bodyClass); } catch (JsonProcessingException e) { e.printStackTrace(); } } } RequestContext<?> httpRequestContext = new HttpRequestContext(headers, parameterValueMap, urlParameterMap, responseBody, bodyClass, endpointPathPattern, httpMethod); return httpRequestContext; } public static Class<?> getClass(MethodParameter methodParameter) { Assert.isAssignable(RequestContext.class, methodParameter.getParameterType()); Class<?> bodyClass = null; Type parameterType = methodParameter.getGenericParameterType(); if (parameterType instanceof ParameterizedType) { ParameterizedType parameterizedType = (ParameterizedType) parameterType; if (parameterizedType.getActualTypeArguments().length == 1) { return (Class<?>) parameterizedType.getActualTypeArguments()[0]; } } else if (parameterType instanceof Class) { bodyClass = Object.class; } return Optional.ofNullable(bodyClass).orElseThrow(() -> new IllegalArgumentException(\"can not find parameter type\")); }} Controller123456789101112131415161718192021package com.example.demo;import com.example.demo.raml.RequestContext;import org.springframework.http.ResponseEntity;import org.springframework.web.bind.annotation.*;@CrossOrigin@RestController@RequestMapping(\"/api\")public class TestController { @GetMapping(\"/version\") ResponseEntity<Greeting> test(RequestContext<Void> requestContext) { String version = requestContext.getParamter(\"version\").orElseThrow(IllegalArgumentException::new); Greeting greeting = new Greeting(); greeting.setFirstName(\"Charles\"); greeting.setLastName(\"Cao\"); greeting.setVersion(version); return ResponseEntity.ok().body(greeting); }} 如何注册springmvc参数解析器1234567891011121314151617181920212223242526272829303132333435package com.example.demo;import com.example.demo.raml.RequestContextProcessor;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.context.annotation.Bean;import org.springframework.context.annotation.Configuration;import org.springframework.http.converter.HttpMessageConverter;import org.springframework.web.method.support.HandlerMethodArgumentResolver;import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;import java.util.List;@Configurationpublic class RestMvcConfigureer implements WebMvcConfigurer { private static final Logger log = LoggerFactory.getLogger(RestMvcConfigureer.class); @Autowired private transient RequestContextProcessor requestContextProcessor; @Override public void addArgumentResolvers(List<HandlerMethodArgumentResolver> resolvers) {//把自己参数解析器添加进去参数解析器的集合 log.info(\"add RequestContextProcessor handlerMethodArgumentResolver\"); resolvers.add(requestContextProcessor); } @Bean public RequestContextProcessor requestContextProcessor(List<HttpMessageConverter<?>> converters) { log.info(\"create RequestContextProcessor bean\"); return new RequestContextProcessor(converters); }} 参考资料 基于HttpMessageConverter消息转换器的参数解析器 配套代码","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"RAML API生成器","date":"2021-08-08T04:22:00.000Z","path":"2021/08/08/RAML-API生成器/","text":"RAML API生成器 springmvc-raml-plugin是一个Maven插件,用以从RAML文件生成Spring框架的Controller和POJO代码,结合mybatis-plus就能实现接口设计到数据库。后面就是结合start.spring.io定制化的快速代码生成平台,国内阿里巴巴就定制化了Aliyun Java Initialize ,为所有开发者提供Springboot企业化代码平台。 从RAML文件生成Spring代码实现12345678910111213141516171819202122232425262728293031<plugin> <groupId>com.phoenixnap.oss</groupId> <artifactId>springmvc-raml-plugin</artifactId> <version>2.0.5</version> <configuration> <ramlPath>src/main/api/version.raml</ramlPath> <schemaLocation>src/main/api/schemas</schemaLocation> <outputRelativePath>src/main/java</outputRelativePath> <addTimestampFolder>false</addTimestampFolder> <basePackage>com.example.demo</basePackage> <baseUri>/api</baseUri> <generateUnreferencedObjects>true</generateUnreferencedObjects> <generationConfig> <includeAdditionalProperties>false</includeAdditionalProperties> ... </generationConfig> <seperateMethodsByContentType>false</seperateMethodsByContentType> <rule>com.phoenixnap.oss.ramlplugin.raml2code.rules.Spring4ControllerStubRule</rule> <ruleConfiguration> </ruleConfiguration> </configuration> <executions> <execution> <id>generate-springmvc-endpoints</id> <phase>compile</phase> <goals> <goal>generate-springmvc-endpoints</goal> </goals> </execution> </executions></plugin> RAML12345678910111213141516171819202122232425262728293031323334353637383940#%RAML 1.0title: Hello world # required titlebaseUri: http://localhost:8080/apiversion: 1.0schemas: VersionInline: | { \"type\": \"object\", \"$schema\": \"http://json-schema.org/draft-03/schema\", \"title\": \"Greeting\", \"description\": \"Will greet you\", \"properties\": { \"firstName\": { \"type\": \"string\", \"description\": \"名字\" }, \"lastName\": { \"type\": \"string\", \"description\": \"姓\" }, \"version\": { \"type\": \"string\", \"description\": \"版本号\" } } }/version: # optional resource get: # HTTP method declaration queryParameters: version: type: string example: \"1.0\" responses: # declare a response 200: # HTTP status code body: # declare content of response application/json: # media type schema: VersionInline # can also be used with 'type' example: !include example/version-example.json example/version-example.json 12345{ \"firstName\": \"Charles\", \"lastName\": \"Cao\", \"version\": \"1.0\"} 生成的Controller和POJO1234567891011121314151617181920212223242526272829303132package com.example.demo;import com.example.demo.model.VersionInline;import org.springframework.http.ResponseEntity;import org.springframework.validation.annotation.Validated;import org.springframework.web.bind.annotation.RequestMapping;import org.springframework.web.bind.annotation.RequestMethod;import org.springframework.web.bind.annotation.RequestParam;import org.springframework.web.bind.annotation.RestController;/** * No description * (Generated with springmvc-raml-parser v.2.0.5) * */@RestController@RequestMapping(\"/api/version\")@Validatedpublic class VersionController { /** * No description * */ @RequestMapping(value = \"\", method = RequestMethod.GET) public ResponseEntity<VersionInline> getVersionInlineByVersion( @RequestParam String version) { return null; //TODO Autogenerated Method Stub. Implement me please. }} 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123package com.example.demo.model;import com.fasterxml.jackson.annotation.JsonInclude;import com.fasterxml.jackson.annotation.JsonProperty;import com.fasterxml.jackson.annotation.JsonPropertyDescription;import com.fasterxml.jackson.annotation.JsonPropertyOrder;import org.apache.commons.lang.builder.EqualsBuilder;import org.apache.commons.lang.builder.HashCodeBuilder;import org.apache.commons.lang.builder.ToStringBuilder;/** * Greeting * <p> * Will greet you * */@JsonInclude(JsonInclude.Include.NON_NULL)@JsonPropertyOrder({ \"firstName\", \"lastName\", \"version\"})public class VersionInline { /** * 名字 * */ @JsonProperty(\"firstName\") @JsonPropertyDescription(\"\\u540d\\u5b57\") private String firstName; /** * 姓 * */ @JsonProperty(\"lastName\") @JsonPropertyDescription(\"\\u59d3\") private String lastName; /** * 版本号 * */ @JsonProperty(\"version\") @JsonPropertyDescription(\"\\u7248\\u672c\\u53f7\") private String version; /** * 名字 * */ @JsonProperty(\"firstName\") public String getFirstName() { return firstName; } /** * 名字 * */ @JsonProperty(\"firstName\") public void setFirstName(String firstName) { this.firstName = firstName; } /** * 姓 * */ @JsonProperty(\"lastName\") public String getLastName() { return lastName; } /** * 姓 * */ @JsonProperty(\"lastName\") public void setLastName(String lastName) { this.lastName = lastName; } /** * 版本号 * */ @JsonProperty(\"version\") public String getVersion() { return version; } /** * 版本号 * */ @JsonProperty(\"version\") public void setVersion(String version) { this.version = version; } @Override public String toString() { return new ToStringBuilder(this).append(\"firstName\", firstName).append(\"lastName\", lastName).append(\"version\", version).toString(); } @Override public int hashCode() { return new HashCodeBuilder().append(firstName).append(lastName).append(version).toHashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } if ((other instanceof VersionInline) == false) { return false; } VersionInline rhs = ((VersionInline) other); return new EqualsBuilder().append(firstName, rhs.firstName).append(lastName, rhs.lastName).append(version, rhs.version).isEquals(); }} Maven plugin goals: generate-springmvc-endpoints12345678910111213141516171819202122232425262728[INFO] Scanning for projects...[INFO] [INFO] -----------------< com.example:cucumber-rest-assured >------------------[INFO] Building demo 0.0.1-SNAPSHOT[INFO] --------------------------------[ jar ]---------------------------------[INFO] [INFO] --- springmvc-raml-plugin:2.0.5:help (default-cli) @ cucumber-rest-assured ---[INFO] RAML to Spring MVC code generator 2.0.5 Component is Maven plugin that reads RAML documents and creates Spring MVC endpointsThis plugin has 2 goals:springmvc-raml:generate-springmvc-endpoints Maven Plugin MOJO specific to Generation of Spring MVC Endpoints from RAML documents.springmvc-raml:help Display help information on springmvc-raml-plugin. Call mvn springmvc-raml:help -Ddetail=true -Dgoal=<goal-name> to display parameter details.[INFO] ------------------------------------------------------------------------[INFO] BUILD SUCCESS[INFO] ------------------------------------------------------------------------[INFO] Total time: 0.843 s[INFO] Finished at: 2021-08-08T12:03:21+08:00[INFO] ------------------------------------------------------------------------ 参考资料 springmvc-raml-plugin 配套代码 Spring MVC - RAML Spec Synchroniser简介","tags":[{"name":"架构","slug":"架构","permalink":"https://caochikai.github.io/tags/%E6%9E%B6%E6%9E%84/"}]},{"title":"API设计语言之Raml","date":"2021-08-07T14:06:00.000Z","path":"2021/08/07/API设计语言之Raml/","text":"API设计语言之RamlRAML RAML的全称是RESTful API建模语言,这是一种基于YAML格式的新规范,在开始编写代码之前以一种全新的方式对API进行建模。接口设计不仅仅是高悬楼阁的概念,通过接口设计文档ATD到文档,再加上RAML完成API建模。有了RAML文件自定义spring.io代码(自行扩展)代码生成SpringBoot工程,真正实现从设计到代码落地!在国外大公司比较出名的商业产品,国内流行是Swagger,Swagger属于OpenAPI标准。 先API建模,通过raml文件提供给代码生成器生成设计好的接口,再加上mybatis-plus或者Spring JPA代码生成器,简直无敌! 配合定制化java公共库,提供SpringMVC Controller接口URL、方法、请求体和参数校验,在Raml没有新接口代码会校验失败。 代替PostMan做接口测试。 提供Mock服务。 提供API文档,有HTML,markdown格式(raml2html GitHub开源npm插件)。 Example:123456789101112131415#%RAML 1.0title: Hello world # required title/greeting: # optional resource get: # HTTP method declaration responses: # declare a response 200: # HTTP status code body: # declare content of response application/json: # media type # structural definition of a response (schema or type) type: object properties: message: string example: # example how a response looks like message: \"Hello world\" 接口可视化设计 API Designer 是本地设计npm插件,用 JavaScript 编写的 RAML(RESTful API 建模语言)的可视化编辑器。 默认情况下,编辑器使用存储在 HTML5 Localstorage 中的浏览器内文件系统。 本地安装12345678910$ npm install -g api-designer...+ api-designer@0.4.1added 152 packages from 132 contributors in 28.413s$ npm install -g request+ request@2.88.2added 47 packages from 58 contributors in 5.568s$ api-designerAPI designer running on port 3000...(node:6036) [DEP0066] DeprecationWarning: OutgoingMessage.prototype._headers is deprecated 可视化设计插件安装 version.raml(建议安装RAML Plugin For IntelliJ)123456789101112131415161718192021222324252627#%RAML 1.0title: Hello world # required titlebaseUri: http://localhost:8080/apiversion: 1.0/version: # optional resource get: # HTTP method declaration queryParameters: version: type: string example: \"1.0\" responses: # declare a response 200: # HTTP status code body: # declare content of response application/json: # media type # structural definition of a response (schema or type) type: object properties: firstName: string lastName: string version: string example: | { \"firstName\": \"Charles\", \"lastName\": \"Cao\", \"version\": \"1.0\" } 测试SpringMVC接口 1234567891011121314@CrossOrigin@RestController@RequestMapping(\"/api\")public class TestController { @GetMapping(\"/version\") ResponseEntity test(@RequestParam String version) { Greeting greeting = new Greeting(); greeting.setFirstName(\"Charles\"); greeting.setLastName(\"Cao\"); greeting.setVersion(version); return ResponseEntity.ok().body(greeting); }} 1234567891011121314151617###IDEA http editoer requestGET http://localhost:8080/api/version?version=1.0HTTP/1.1 200 Content-Type: application/jsonTransfer-Encoding: chunkedDate: Sat, 07 Aug 2021 13:14:57 GMTKeep-Alive: timeout=60Connection: keep-alive{ \"firstName\": \"Charles\", \"lastName\": \"Cao\", \"version\": \"1.0\"}Response code: 200; Time: 174ms; Content length: 56 bytes 生成mock服务12345678$ npm install -g osprey-mock-serviceC:\\Users\\Administrator\\AppData\\Roaming\\npm\\osprey-mock-service -> C:\\Users\\Administrator\\AppData\\Roaming\\npm\\node_modules\\osprey-mock-service\\bin\\osprey-mock-service.js+ osprey-mock-service@1.0.0added 136 packages from 55 contributors in 33.128s###需要window管理员权限# osprey-mock-service -f version.raml -p 8000Mock service running at http://localhost:8000::ffff:127.0.0.1 - - [07/Aug/2021:13:36:33 +0000] \"GET /api/version?version=1.0 HTTP/1.1\" 200 - \"-\" \"Apache-HttpClient/4.5.13 (Java/11.0.11)\" 123456789101112131415GET http://localhost:8000/api/version?version=1.0HTTP/1.1 200 OKContent-Type: application/jsonDate: Sat, 07 Aug 2021 13:36:33 GMTConnection: keep-aliveTransfer-Encoding: chunked{ \"firstName\": \"Charles\", \"lastName\": \"Cao\", \"version\": \"1.0\"}Response code: 200 (OK); Time: 228ms; Content length: 73 bytes 接口测试 测试用的工具有多重选择,选择了相对比较流行的Abao工具,只支持RAML 0.8以下版本。 1234# npm install -g abao...+ abao@0.5.0added 194 packages from 450 contributors in 26.061s 参考资料 Raml官方文档 Raml各大工具 Raml本地设计工具 Raml例子 Raml入门","tags":[{"name":"架构","slug":"架构","permalink":"https://caochikai.github.io/tags/%E6%9E%B6%E6%9E%84/"}]},{"title":"API设计文档案例","date":"2021-08-07T06:47:00.000Z","path":"2021/08/07/API设计文档案例/","text":"API设计文档案例一、接口设计 在接口规范指导下,根据需求编写接口设计文档ATD(API Design Document),描述功能形成的聚合服务API,每个聚合服务PAPI(Process API)需要多少个基础服务SAPI(System API);每个聚合服务需要编写接口技术文档,概要描述下用户接口应用场景,详细到请求方法和URL,数据字段映射关系和错误代码设计等; 二、接口设计文档ATD 好的接口有高可阅读性,版本发布计划,全面的文档,安全性设计和所需资源性能计算,网络拓扑图,标准错误和日志处理,弹性可扩容等等特性。 ddd领域驱动设计 通过组件图描述形成逻辑建模,区分聚合服务和基础服务关系,以及是否缺少基础服务组件支持。 DDD 网络拓扑图 拓扑 标准的公共错误处理 HTTP Code Error Code Description 400 BERR_00001 请求参数校验失败 404 BERR_00002 基础功能不可用 三、接口技术文档ATD流程图 每个聚合服务PAPI(Process API)需要多少个基础服务SAPI(System API) image 字段映射 如果是聚合服务Process API,提供聚合请求数据和基础服务之间字段映射;如果是基础服务System API,提供基础服务和表字段,MQ数据源字段之间映射关系; 请求Request URL PAPI参数 SAPI参数 备注 POST /favorites functionId funcId 菜单ID 请求体Request JSON example和schema1[\"USER_ADD\",\"USER_DEL\"] 123456789101112131415161718{ \"type\": \"array\", \"$schema\": \"http://json-schema.org/draft-03/schema\", \"title\": \"Greeting\", \"description\": \"update menu list\", \"properties\": { \"items\": { \"type\": \"array\", \"description\": \"a list of menu id\", \"properties\": { \"key\": { \"type\": \"string\", \"description\": \" menu id\" } } } }} 响应Response PAPI字段名 SAPI字段名 是否为空 备注 firstName customerFirstName no 名字 lastName customerLastName no 姓 menu menu no 菜单名称 响应体Request JSON example和schema12345{ \"firstName\": \"Charles\", \"lastName\": \"Cao\", \"menu\": \"USER_ADD,USER_DEL\"} 1234567891011121314151617181920{ \"type\": \"object\", \"$schema\": \"http://json-schema.org/draft-03/schema\", \"title\": \"Greeting\", \"description\": \"Will greet you\", \"properties\": { \"firstName\": { \"type\": \"string\", \"description\": \"名字\" }, \"lastName\": { \"type\": \"string\", \"description\": \"姓\" }, \"menu\": { \"type\": \"string\", \"description\": \"菜单\" } }} 错误匹配 HTTP Code PAPI Error Code SAPI Error Code Description 401 PERR_00003 SERR_00003 请求参数校验失败 400 PERR_00004 functionId不存在 关键技术决策 发起人 描述 状态 XXX 菜单信息需要关系型数据库Mysql存储 完成 XXX 通过MQ向XX队列发起消息通知 代完成 四、参考资料ProcessOn模板社区-AWS","tags":[{"name":"架构","slug":"架构","permalink":"https://caochikai.github.io/tags/%E6%9E%B6%E6%9E%84/"}]},{"title":"接口测试 Rest-assured","date":"2021-08-02T14:31:00.000Z","path":"2021/08/02/接口测试-Rest-assured/","text":"接口测试 Rest-assured一、如何确保 API 的稳定性与正确性呢? 全面系统的测试是必不可少的。Java 开发者常常借助于 JUnit或者TestNg做单元测试!但是从REST API来讲,开发者知道正在测试的是哪个类、哪个方法,测试的是哪个 REST API,所以就需要由一套 Java 实现的 REST API 测试框架。虽然它不是从用户的角度出发,但是结合BDD行为驱动测试,就能保证接口的稳定性和功能的正确性!当然本章还是以rest-assured为主。 什么是Rest-assured? Rest-Assured 是一套由 Java 实现的 REST API 测试框架,它是一个现代化轻量级的 REST API 客户端,可以简化HTTP Builder向服务器端发起 HTTP 请求,并验证和校对返回结果;它的语法非常简洁,是一种专为测试 REST API 而设计的 DSL(针对某一领域,具有受限表达性的一种计算机程序设计语言)。这套写测试代码方案适合复杂且不成熟的接口,另外还有一套以JSON测试方案适合成熟简洁的系统,后面再写。 二、Rest-assured好特性 现代化简洁的语法,针对JSON、XML和身份认证有非常多的语法糖(疯狂链式调用,非常Groovy)。 JSON Schema Validation:特别是校验返回的JSON结构体,初期设计往往会落后于实际,最后需要做回归测试,同步修改初期的设计。 Spring Mock Mvc和Spring Web Test Client有集成模块,生态成熟。 三、集成Cucumber Report Maven Dependencies123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117<?xml version=\"1.0\" encoding=\"UTF-8\"?><project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>2.4.5</version> <relativePath/> <!-- lookup parent from repository --> </parent> <groupId>com.example</groupId> <artifactId>cucumber-rest-assured</artifactId> <version>0.0.1-SNAPSHOT</version> <name>demo</name> <description>Demo project for BDD Test</description> <properties> <java.version>1.8</java.version> </properties> <dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>io.rest-assured</groupId> <artifactId>rest-assured</artifactId> <version>4.2.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.rest-assured</groupId> <artifactId>json-path</artifactId> <version>4.2.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.rest-assured</groupId> <artifactId>xml-path</artifactId> <version>4.2.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.rest-assured</groupId> <artifactId>json-schema-validator</artifactId> <version>4.2.0</version> </dependency> <dependency> <groupId>io.rest-assured</groupId> <artifactId>spring-mock-mvc</artifactId> <version>4.2.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-java</artifactId> <version>6.8.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-spring</artifactId> <version>6.8.0</version> <scope>test</scope> </dependency> <dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-junit</artifactId> <version>6.8.0</version> <scope>test</scope> </dependency> <dependency> <groupId>org.junit.vintage</groupId> <artifactId>junit-vintage-engine</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>info.cukes</groupId> <artifactId>cucumber-core</artifactId> <version>1.2.4</version> <scope>test</scope> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-maven-plugin</artifactId> <configuration> <excludes> <exclude> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> </exclude> </excludes> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.22.2</version> </plugin> </plugins> </build></project> 完整例子1234567891011121314151617181920{ \"type\": \"object\", \"$schema\": \"http://json-schema.org/draft-03/schema\", \"title\": \"Greeting\", \"description\": \"Will greet you\", \"properties\": { \"firstName\": { \"type\": \"string\", \"description\": \"\" }, \"lastName\": { \"type\": \"string\", \"description\": \"\" }, \"version\": { \"type\": \"string\", \"description\": \"\" } }} 1234567891011121314151617181920212223package com.example.demo;import io.cucumber.junit.Cucumber;import io.cucumber.junit.CucumberOptions;import io.cucumber.spring.CucumberContextConfiguration;import org.junit.runner.RunWith;import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc;import org.springframework.boot.test.context.SpringBootTest;/** * Work around. Surefire does not use JUnits Test Engine discovery * functionality. Alternatively execute the the * org.junit.platform.console.ConsoleLauncher with the maven-antrun-plugin. */@RunWith(Cucumber.class)@CucumberOptions( plugin = {\"html:target/results.html\", \"message:target/results.ndjson\"})@CucumberContextConfiguration@AutoConfigureMockMvc@SpringBootTest(classes = DemoApplication.class, webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT)public class RunCucumberTest {} 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859package com.example.demo;import io.cucumber.java.Before;import io.cucumber.java.en.And;import io.cucumber.java.en.Given;import io.cucumber.java.en.Then;import io.cucumber.java.en.When;import io.restassured.RestAssured;import io.restassured.response.Response;import org.hamcrest.Matchers;import org.springframework.beans.factory.annotation.Value;import java.util.HashMap;import java.util.Map;import static io.restassured.RestAssured.given;import static io.restassured.module.jsv.JsonSchemaValidator.matchesJsonSchemaInClasspath;public class GetStep { @Value(\"${local.server.port}\") private int port; private final Map<String, Object> parameters = new HashMap<>(); private Response response; @Given(\"the client provide version {string}\") public void theClientProvideVersion(String version) { parameters.put(\"version\", version); } @When(\"the client calls {string}\") public void theClientCalls(String url) { response = given(). when(). params(parameters). get(url); } @Then(\"^the client receives status code of (\\\\d+)$\") public void the_client_receives_status_code_of(int statusCode) throws Throwable { response.then().statusCode(statusCode); } @And(\"^the client receives server version (.+)$\") public void the_client_receives_server_version_body(String version) throws Throwable { response.then() .body(matchesJsonSchemaInClasspath(\"greeting-schema.json\")) .body(\"version\", Matchers.equalTo(version)); } @Before public void before() { RestAssured.port = port; RestAssured.basePath = \"/api\"; }} 五、参考资料 官方文档 WIKI 配套代码","tags":[{"name":"Test","slug":"Test","permalink":"https://caochikai.github.io/tags/Test/"}]},{"title":"API Standard","date":"2021-08-01T07:30:00.000Z","path":"2021/08/01/API-Standard/","text":"API Standard一、意义 API standard即接口标准,它提供了工程开发指南和最好的培训实现,能让API接口开发者自愿跟随共同的工程标准,技术解决方案,公共库管理,甚至是构建API的模板。 二、各项标准 项目标准涉及到方方面面,随着业务不断发展,持续演变进化,架构团队需要与各团队深度合作,成立架构评审委员会ARB,在不断配合业务部门支撑公司业务发展,勇于承担重任。 分类 标准 文档名称 目的 培训 开发者工具和权限申请 培训文档 使用免费的软件和工具是主要方向,应根据最小权限原则在必要时候申请。 培训 快速开始指引 新人指引离职指引技术经理指引数据库工程师指引 基于不同的团队角色提供开始文档,比如leader和developer 开发流程 代码标准 代码标准 在API开发中必须遵循代码标准 开发流程 分支策略 分支管理策略 提供分支、提交信息、拉请求和代码合并的命名规定。主分支是保护分支。理想情况下,开发完成才有主分支。feature功能分支合并后必须删除。 开发流程 CI/CD Pipeline CI/CD Pipeline 在上生产之前必须审查扫描报告 接口设计 HTTP REST,API契约和JSON标准 接口设计 遵循OpenAPI 或者RAML规范。JSON schema和example由开发者必须提供。 应用基础 Spring Boot框架 应用基础 选型Spring Boot作为后端API开发基础框架。提供父POM和公共包推动API接口开发。 应用基础 依赖管理 源码依赖管理 提供parent-dependencies BOM统一依赖管理。提供两套父POM针对WebMvc和WebFlux标准给API开发。开发者可以根据需求选择其中一个父POM。 应用基础 仓库、包、类、文件夹结构的命名规范和代码规范 源码管理 开发者应当遵守包命名规范。为SpringBoot API提供了预定义的文件夹和文件结构。提供了编码规范,建议开发人员应遵循。 应用基础 公共库 公共库 提供公共库来推动API开发。loggingerror-handingraml-paserhttp-client 应用基础 模板和参考实现 模板和参考实现 提供简单和模板。提供代码生成器。 编码流程 业务流程和编排 业务流程和编排 对基础服务选择同步或者异步调用。对基础服务选择同步或者顺序调用。复杂编排规范分析。 编码流程 非阻塞IO和响应式模式 非阻塞IO和响应式模式 应当使用Reactor Reactive开发。 编码流程 数据校验 数据校验 输入数据必须要在业务处理之前校验。API规范中必须定义数据校验。 编码流程 数据转化和匹配 数据转化和匹配 MapStruct选型为mapping framework。Jackson选型为默认json和POJO 转化框架。Lombok禁止使用。 编码流程 限流熔断 限流熔断 在应用推荐使用resislence4j库。 编码流程 异常处理 异常处理 Error Schema必须遵守。自定义HTTP状态码和Error code不要冲突公共规范。提供公共全局异常处理库处理应用跑出的error。 协议支持 Http Server Http Server 强制默认使用tomcat和8080端口或netty。 协议支持 HTTP Client HTTP Client Reactor Netty Client是核心http client库。公共库提供默认配置和扩展配置。如超时、代理等。 协议支持 数据加密和安全 数据加密和安全 提供公共库异步记录请求和响应到日志,保护敏感数据进行加密打印。 安全 鉴权 鉴权 强制开启Token校验。允许健康检查相关端点跳过。 安全 授权 授权 针对用户角色校验当前请求时候是否有权限。 安全 环境变量加密 环境变量加密 阿里云密钥管理服务(KMS)或者AWS Secret Mananger来存储环境变量。 安全 密钥管理 密钥管理 阿里云密钥管理服务(KMS)或者AWS Secret Mananger来管理秘钥,通过脚本定期更新服务密码。如数据库和redis。 安全 SSL强制校验 SSL强制校验 所有连接必须使用SSL加密。应用中通过Https FItler强制开启。 安全 参数加密解密 参数加密解密 公共库secure-data提供运行时重要参数加密解密。 安全 Client ID和Secret秘钥对 Client ID和Secret秘钥对 针对部分的外部第三应用授权和接口限流。 监控和故障排查 日志 日志 关键信息必须打印。提供公共包logging。 监控和故障排查 日志框架 日志框架 SF4J and logback选型为默认日志库。 监控和故障排查 日志规范 日志规范 INFO级别为默认配置。长时间处理任务必须打印日志。日志打印点和数据必须review。 监控和故障排查 日志配置和输出 日志配置和输出 提供公共日志配置和公共抽象接口,异常时将打印erro并返回规定error schema。 监控和故障排查 日志追踪 日志追踪 日志必须记录异常出现信息和Trace ID,不能吞掉异常信息。 监控和故障排查 Info和Health端点 Info和Health端点 提供公共库endpoint。info端点必须提供当前应用基础信息。health端点必须保护应用当前状态。 监控和故障排查 Splunk集成 Splunk集成 通过CICD pipeline配置namespace lable完成集成。在控制台必须符合监控规范。 监控和故障排查 AppDynamic集成 AppDynamic集成 在容器里配置JAVAOPS AppDynamic参数和agent集成。 支持 发布配置 发布配置 推荐发布配置不要参合到应用代码。通过属性外部化配置,如容器环境变量,通过不修改代码和重新部署情况下方便重新调整配置。 支持 多区域发布支持 多区域发布支持 同一套代码支持不同数据中心。 测试 单元测试和代码覆盖率 单元测试和代码覆盖率 使用Junit 5 单元测试框架。提供公共库完成API功能测试。行和分支测试覆盖率不能低于100%。 测试 集成测试 集成测试 做测试渗透率和压力测试。提供jemeter压测脚本和框架,通过Prometheus控制面板集成Jenkins报告和AppDynamic jvm性能指标。 审计 Checkmax, Nexus IQ,SonarQube和Jacoco 代码质量测量 Checkmax能检查应用代码漏洞,要求必须为零。Nexus IQ扫描安全和license法务风险,超低级必须低于5个,中高级风险必须为零,否则提供审计报告。SonarQube和jacoco测量代码和测试质量,漏洞和错误必须为零,测试覆盖必须100%。 审计 README.md 项目文档规范 READMED.md必须强制记录关键信息。 其他 架构演变历史 架构演变历史 记录架构随年度变化演变历史背景。 其他 支持插件 支持插件 提供插件支持根据预定义规则扫描Maven构建过程中的源码,上传到分析服务,生成对应报告,验证当前代码是否符合整套架构规范。","tags":[{"name":"架构","slug":"架构","permalink":"https://caochikai.github.io/tags/%E6%9E%B6%E6%9E%84/"}]},{"title":"行为驱动测试BDD Cucumber方案","date":"2021-07-31T16:19:00.000Z","path":"2021/08/01/行为驱动测试BDD-Cucumber方案/","text":"行为驱动测试BDD Cucumber方案一、什么是BDD Behaviour-Driven Development既行为驱动开发,是构建Cucumber以支持的软件开发过程。 BDD是软件团队的一种工作方式,通过以下方式缩小业务人员和技术人员之间的差距: 鼓励跨角色的协作,以建立对要解决的问题的共享理解。 业务员根据用户的行为生成需求场景描述文档,比如行为条件和参数,相当于伪代码。 技术人员通过用户的行为场景设计测试用例,验收和驱动开发,并生成符合伪代码报告。 敏捷流程 二、Cucumber特性 Cucumber是一种可以使用文本描述语言来执行自动测试用例的工具,使用的语言叫做Gherkin。Gherkin用于描述软件的行为而不需要了解具体的实现,主要有两个目的文档和自动测试用例(最好和手工测试用例统一)。 Gherkin支持超过40种语言,包括英文、中文。 Gherkin可以在任何地方新增注释,注释已#开头,都是以.feature结尾,在feature文件中输入功能描述、场景、步骤,当执行这个功能时每一个步骤都需要编写java代码块来实现具体的功能。当前cucumber支持多种语言,还可以使用java和spring、javascript和react。下面是Cucumber Ruby 官方截图: Ruby官方例子 Cucumber Report截图 Gherkin语言基本概念:Features功能 一个feature文件对应一组集合功能,比如用户管理。 Scenario场景 一个feature文件对应一组Scenario场景,比如用户管理功能有新增用户,删除用户,修改用户和查询用户四个场景。Scenario Outline场景大纲必须包含一个示例Examples,Examples有多少行测试驱动数据,测试场景Scenario就执行多少次,以下面步骤第一步Given step的参数变量,就来源于Examples下的day。 Step denfinitions步骤 Scenario场景下有多个Step步骤,每一Step步骤都以Given, When, Then, And, or But开始。 Given When then(假如 当 那么) 每一步都以Given, When, Then, And, or But开始。java实现都有对应注解。 Given——@Given:用例开始执行前的一个前置条件,比如用户已经登录对应代码有jwt token。 When——@When:用例开始执行的一些关键操作步骤,类似修改用户性别等。 Then——@Then:观察结果,就是平时用例中的验证步骤,比如修改后验证数据库是否存入操作成功。 通过下列步骤介绍Cucumber流程: 第一步:通过Gherkin伪代码文本语言描述,写到feature文件。以官方10分钟体验为例: 12345678910111213Feature: Is it Friday yet? Everybody wants to know when it's Friday Scenario Outline: Today is or is not Friday Given today is \"<day>\" When I ask whether it's Friday yet Then I should be told \"<answer>\" Examples: | day | answer | | Friday | TGIF | | Sunday | Nope | | anything else! | Nope | 使用java进行步骤定义。feature文件的step对应了每个java方法上的注解,而且注解双引号文本内容在所有feature中唯一且不可重复。 1234567891011121314151617181920212223242526272829303132package hellocucumber;import io.cucumber.java.en.Given;import io.cucumber.java.en.When;import io.cucumber.java.en.Then;import static org.junit.Assert.*;class IsItFriday { static String isItFriday(String today) { return \"Friday\".equals(today) ? \"TGIF\" : \"Nope\"; }}public class Stepdefs { private String today; private String actualAnswer; @Given(\"today is {string}\") public void today_is(String today) { this.today = today; } @When(\"I ask whether it's Friday yet\") public void i_ask_whether_it_s_Friday_yet() { actualAnswer = IsItFriday.isItFriday(today); } @Then(\"I should be told {string}\") public void i_should_be_told(String expectedAnswer) { assertEquals(expectedAnswer, actualAnswer); }} Maven mvn test控制台输出: 1234567891011121314151617181920212223242526272829303132------------------------------------------------------- T E S T S-------------------------------------------------------Running hellocucumber.RunCucumberTestFeature: Is it Friday yet? Everybody wants to know when it's Friday Scenario Outline: Today is or is not Friday # hellocucumber/is_it_friday_yet.feature:4 Given today is "<day>" When I ask whether it's Friday yet Then I should be told "<answer>" Examples: Scenario Outline: Today is or is not Friday # hellocucumber/is_it_friday_yet.feature:11 Given today is "Friday" # Stepdefs.today_is(String) When I ask whether it's Friday yet # Stepdefs.i_ask_whether_it_s_Friday_yet() Then I should be told "TGIF" # Stepdefs.i_should_be_told(String) Scenario Outline: Today is or is not Friday # hellocucumber/is_it_friday_yet.feature:12 Given today is "Sunday" # Stepdefs.today_is(String) When I ask whether it's Friday yet # Stepdefs.i_ask_whether_it_s_Friday_yet() Then I should be told "Nope" # Stepdefs.i_should_be_told(String) Scenario Outline: Today is or is not Friday # hellocucumber/is_it_friday_yet.feature:13 Given today is "anything else!" # Stepdefs.today_is(String) When I ask whether it's Friday yet # Stepdefs.i_ask_whether_it_s_Friday_yet() Then I should be told "Nope" # Stepdefs.i_should_be_told(String)3 Scenarios (3 passed)9 Steps (9 passed)0m0.255s 三、集成Spring生成测试报告Maven Dependencies123456789101112131415161718<dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-java</artifactId> <version>6.8.0</version> <scope>test</scope></dependency><dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-junit</artifactId> <version>6.8.0</version> <scope>test</scope></dependency><dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-spring</artifactId> <version>6.8.0</version> <scope>test</scope></dependency> REST Controller1234567@RestControllerpublic class VersionController { @GetMapping(\"/version\") public String getVersion() { return \"1.0\"; }} Cucumber Step Definitions12345//单独跑Cucumber@RunWith(Cucumber.class)@CucumberOptions(features = \"src/test/resources\")public class CucumberIntegrationTest {} 12345Feature: the version can be retrieved Scenario: client makes call to GET /version When the client calls /version Then the client receives status code of 200 And the client receives server version 1.0 12345678910111213141516@When(\"^the client calls /version$\")public void the_client_issues_GET_version() throws Throwable{ executeGet(\"http://localhost:8080/version\");}@Then(\"^the client receives status code of (\\\\d+)$\")public void the_client_receives_status_code_of(int statusCode) throws Throwable { HttpStatus currentStatusCode = latestResponse.getTheResponse().getStatusCode(); assertThat(\"status code is incorrect : \"+ latestResponse.getBody(), currentStatusCode.value(), is(statusCode));}@And(\"^the client receives server version (.+)$\")public void the_client_receives_server_version_body(String version) throws Throwable { assertThat(latestResponse.getBody(), is(version));} 1234567891011121314//集成Springboot@CucumberContextConfiguration@SpringBootTestpublic class SpringIntegrationTest { // executeGet implementation}//进行接口自动化测试public class StepDefs extends SpringIntegrationTest { @When(\"^the client calls /version$\") public void the_client_issues_GET_version() throws Throwable { executeGet(\"http://localhost:8080/version\"); }} 四、Moco Server在集成测试和开发中应用 moco工具是在github开源的一个项目,可以搭一个简单的mock server方便我们进行开发调试,以及在单元测试中取消边界服务。有两种形式,第一种是集成到项目,随项目启动提供http mock服务。第二种是一个jar包,配合配置文件和命令启动http服务。 通过jar提供mock http服务:首先我们要编写一个config文件,把我们需要“模拟”的请求和响应写入这个配置文件,配置文件是json格式的访问 localhost:12306/hello 接口,返回一个纯文本“moco”。 123456789101112[ { \"request\": { \"uri\":\"/hello\" }, \"response\": { \"text\":\"moco\" } }] 启动指令: 1java -jar moco-runner-0.11.1-standalone.jar http -p 12306 -c config.json 集成到项目Maven Dependencies12345<dependency> <groupId>com.github.dreamhead</groupId> <artifactId>moco-core</artifactId> <version>1.2.0</version></dependency> API Test123456789101112131415161718192021222324252627282930313233import org.junit.After;import org.junit.Before;import org.junit.Test;import java.io.IOException;import static com.github.dreamhead.moco.Moco.httpServer;import static com.github.dreamhead.moco.Runner.runner;import static org.hamcrest.CoreMatchers.is;import static org.hamcrest.MatcherAssert.assertThat;public class MocoRunnerTest { private Runner runner; @Before public void setup() { HttpServer server = httpServer(12306); server.response(\"foo\"); runner = runner(server); runner.start(); } @After public void tearDown() { runner.stop(); } @Test public void should_response_as_expected() throws IOException { Content content = Request.Get(\"http://localhost:12306\").execute().returnContent(); assertThat(content.asString(), is(\"foo\")); }} 五、参考资料 Cucumber官方文档 Cucumber测试报告例子 Cucumber入门2 - 啥是BDD? Moco Server","tags":[{"name":"Test","slug":"Test","permalink":"https://caochikai.github.io/tags/Test/"}]},{"title":"Netty Connection prematurely closed BEFORE response和connection reset by peer解决方案","date":"2021-07-31T03:50:00.000Z","path":"2021/07/31/Netty-Connection-prematurely-closed-BEFORE-response和connection-reset-by-peer解决方案/","text":"Netty Connection prematurely closed BEFORE response和connection reset by peer解决方案一、异常说明 在长达一年时间里面,由于云基础设施kubernetes经常升级更新,以及网络原因不定期出现。长期和云设施部门打交道,所有k8s集群都是Istio进行流量控制,Istio提供服务网格服务。如果要请求到容器外部网络,需要通过Egress Gateway的组件,从HTTPS代理Squid proxy请求外部网络。一个Squid proxy集群对应一个k8s,一个Squid集群十二个节点,部分节点出现故障就会造成整个集群出现概率性netty请求异常。作为踩雷先锋队和救火员,所以很多其他部门或者团队都有向我咨询反馈,首先从日志平台Splunk搜索到异常,进入容器通过curl -kv 测试外部请求,出现概率性连接异常,连接被重置了。概率问题通常会想到jemeter做接口压测,通过jemeter的聚合报告,大概有23%概率出现error,写报告申请云设施部门反馈,最终发现某个Squid节点故障,删除该节点再次压测,一切恢复正常。后面一段时间,遇到部署成功,第一次请求一定发生该上次问题,反馈之后短时间没解决,我们通过netty reactor retry策略,一旦发生该异常触发重试请求,从代码上解决问题。 1CURL ERROR: Recv failure: Connection reset by peer Splunk收集到的异常日志: Splunk是收集实时IT设施的数据日志平台,在国外非常流行,类似商业化的ELK。架构上通过splunk-connect-for-kubernetes开源项目,负责收集几个K8s集群的容器日志,每个集群大概200到300多个namespace,可以说性能非常给力。 Connection prematurely closed BEFORE response 12345reactor.core.ReactiveException: reactor.netty.http.client.PrematureCloseException: Connection prematurely closed BEFORE response at reactor.core.Exceptions.propagate(Exceptions.java:393) at reactor.core.publisher.BlockingSingleSubscriber.blockingGet(BlockingSingleSubscriber.java:97) at reactor.core.publisher.Mono.block(Mono.java:1678)·······业务相关,省略 connection reset by peer 12345678910111213141516java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.read0(FileDispatcherImpl.java) at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39) at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223) at sun.nio.ch.IOUtil.read(IOUtil.java:192) at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:380) at io.netty.buffer.PooledUnsafeDirectByteBuf.setBytes(PooledUnsafeDirectByteBuf.java:288) at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1108) at io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:345) at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:131) at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:645) at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:580) at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:497) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:459) at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:886) at java.lang.Thread.run(Thread.java:748) 二、Retry策略解决第一次必定出现Connection reset by peer 在实际的开发中,可以请求重试的场景应该是:网络异常、请求超时异常,需根据实际应用场景的设计重试策略,防止重试带来接口幂等性问题。 123456789101112Retry<?> retry = Retry.onlyIf(x -> x.exception() instanceof IOException) .retryMax(3) // 重试3次 .backoff(Backoff.exponential(Duration.ofSeconds(5),Duration.ofSeconds(60),2,true));Mono<String> mono = webClient .get() //GET 请求 .uri(\"/posts/1\") // 请求路径,这里的请求路径是正确的 .retrieve() .bodyToMono(String.class) .retryWhen(retry); //满足Retry条件进行重试System.out.println(\"=====\" + mono.block()); 三、网络分析 网络异常原因从本质上网络连接,牵扯到TCP三次握手建立连接,又导致Socket套接字在读取数据时,服务器端因为某种原因提前关闭了Connection,而客户端依然用握手成功的旧连接在读写数据,此时服务器会返回复位标志“RST”,然后客户端就会提示“java.net.SocketException: Connection reset”。 基础概念套接字Socket 以192.168.1.11:80为例子,TCP把连接作为最基本的对象,每一条TCP连接都有两个端点,这种端点我们叫作套接字(socket),它的定义为端口号拼接到IP地址即构成了套接字。 半连接队列和全连接队列 在 TCP 三次握手的时候,Linux 内核会维护两个队列,分别是:半连接队列,也称 SYN 队列;全连接队列,也称 accepet 队列; 服务端收到客户端发起的 SYN 请求后,内核会把该连接存储到半连接队列,并向客户端响应 SYN+ACK,接着客户端会返回 ACK,服务端收到第三次握手的 ACK 后,内核会把连接从半连接队列移除,然后创建新的完全的连接,并将其添加到 accept 队列,等待进程调用 accept 函数时把连接取出来。 滑动窗口 控制报文流量,用来告诉对方目前接收端缓冲器大小。当为0时标识缓冲器已满,需要停止发包,单位为byte。 SYN 同步序列编号(Synchronize Sequence Numbers),在客户机和服务器之间建立正常的TCP网络连接时,客户机首先发出一个SYN消息。 ACK Acknowledge character即是确认字符,在数据通信中,接收站发给发送站的一种传输类控制字符。 RST Reset the connection表示复位,关闭因某种原因引起出现的错误连接,也用来拒绝非法数据和请求。 FIN No more data from sender,用来释放连接,表明发送方已经没有数据发送了。 URG Urgent Pointer field significant紧急指针。用到的时候值为1,用来处理避免TCP数据流中断。 PSH Push Function——PUSH标志的数据,置1时请求的数据段在接收方得到后就可直接送到应用程序,而不必等到缓冲区满时才传送。 wireshark网络抓包TCP三次握手到RST复位 upload successful TCP协议:三次握手、四次挥手 upload successful 三次握手建立连接 upload successful 四次握手关闭连接 upload successful 四、参考资料 WebClient第6篇-请求失败自动重试机制 github netty: java.io.IOException: Connection reset by peer TCP中的RST标志(Reset)详解 两张动图-彻底明白TCP的三次握手与四次挥手 TCP协议中的6个重要标志位 TCP 半连接队列和全连接队列满了会发生什么?又该如何应对?","tags":[{"name":"Linux","slug":"Linux","permalink":"https://caochikai.github.io/tags/Linux/"},{"name":"TCP","slug":"TCP","permalink":"https://caochikai.github.io/tags/TCP/"}]},{"title":"redis如何实现消息队列","date":"2020-06-06T14:55:00.000Z","path":"2020/06/06/redis如何实现消息队列/","text":"redis如何实现消息队列一、复盘面试 遇到面试问到:如果我想在redis实现队列,会用到哪个命令?根据redis手册关于Redis 列表(List) 命令主要操作有四种非阻塞版lpush/lpop/rpush/rpop,两种阻塞版本blpop/brpop,l和r代表左(left)和右(right)缩写,push代表添加也叫压入,pop代表弹出。基本满足先进入先出(FIFO)效果就完成了基本的队列,本次GitHub实验代码仓库。 通过命令模拟队列12345678910111213141516RDM Redis Console连接中...已连接。localhost:0>lpush mylist a b c d\"4\"localhost:0>rpop mylist\"a\"localhost:0>rpop mylist\"b\"localhost:0>rpop mylist\"c\"localhost:0>rpop mylist\"d\"localhost:0>rpop mylistnulllocalhost:0> 二、java模拟队列实现生产者消费者模式redis配置常量1234567891011121314151617181920package io.charles;public class Constant { /** * redis链接地址 */ public static final String host = \"127.0.0.1\"; /** * redis启动端口 */ public static final int port = 6379; /** * 正式队列列表名称 */ public static final String task_queue = \"task-queue\"; /** * 临时队列列表名称 */ public static final String tmp_queue = \"tmp-queue\";} TaskProducer模拟生产者123456789101112131415161718192021222324252627282930package io.charles;/** * 用于模拟生产者 */import java.util.Random;import java.util.UUID;import redis.clients.jedis.Jedis;public class TaskProducer implements Runnable { Jedis jedis = new Jedis(Constant.host, Constant.port); public void run() { Random random = new Random(); while (true) { try { Thread.sleep(random.nextInt(600) + 600); // 模拟生成一个任务 UUID taskid = UUID.randomUUID(); //将任务插入任务队列:task-queue jedis.lpush(Constant.task_queue, taskid.toString()); System.out.println(\"插入了一个新的任务: \" + taskid); } catch (Exception e) { e.printStackTrace(); } } }} TaskConsumer模拟消费者1234567891011121314151617181920212223242526272829303132333435363738394041package io.charles;import java.util.Random;import redis.clients.jedis.Jedis;/** * 模拟消费者 */public class TaskConsumer implements Runnable { Jedis jedis = new Jedis(Constant.host, Constant.port); public void run() { Random random = new Random(); while (true) { //从任务队列\"task-queue\"中获取一个任务,并将该任务放入临时队列\"tmp-queue\" String taskid = jedis.rpoplpush(Constant.task_queue, Constant.tmp_queue); // 模拟一下:睡觉 try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } //模拟成功和失败的偶然现象 if (random.nextInt(13) % 7 == 0) {// 模拟失败的情况,概率为2/13 //将本次处理失败的任务从临时队列\"tmp-queue\"中,弹回任务队列\"task-queue\" jedis.rpoplpush(Constant.task_queue, Constant.tmp_queue); System.out.println(taskid + \"处理失败,被弹回任务队列\"); } else {// 模拟成功的情况 // 将本次任务从临时队列\"tmp-queue\"中清除 jedis.rpop(Constant.tmp_queue); System.out.println(taskid + \"处理成功,被清除\"); } } }} TaskShedulerSystem启动生产者和消费者线程1234567891011121314package io.charles;public class TaskShedulerSystem { public static void main(String[] args) throws Exception { // 启动一个生产者线程,模拟任务的产生 new Thread(new TaskProducer()).start(); Thread.sleep(15000); //启动一个线程者线程,模拟任务的处理 new Thread(new TaskConsumer()).start(); }} 三、参考资料 redis中文命令手册 GitHub实验代码仓库","tags":[{"name":"redis","slug":"redis","permalink":"https://caochikai.github.io/tags/redis/"}]},{"title":"基于Seata实现分布式事务","date":"2020-05-31T03:24:00.000Z","path":"2020/05/31/基于Seata实现分布式事务/","text":"一、Seata简介 2019 年 1 月,阿里巴巴中间件团队发起了开源项目 Fescar(Fast & EaSy Commit And Rollback),蚂蚁金服后在Fescar 0.4.0 版本中贡献了 TCC 模式。后来更名为 Seata,意为:Simple Extensible Autonomous Transaction Architecture,是一套一站式分布式事务解决方案。 Seata三大基本组件: Transaction Coordinator (TC): 事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚。 Transaction Manager (TM): 控制全局事务的边界,负责开启一个全局事务,并最终发起全局提交或全局回滚的决议。 Resource Manager (RM): 控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚。 Seata官方调用流程图: upload successful 二、Fescar相比XA二阶段优缺点:优点: 基于SQL解析实现了自动补偿,降低业务侵入性。 第一阶段就本地事务就提交了 ,二阶段commit是异步操作相对XA两段全部持有资源更高效。 Fescar提供了两种模式,AT和MT。在AT模式下事务资源可以是任何支持ACID的数据库,在MT模式下事务资源没有限制,可以是缓存,可以是文件,可以是其他的等等。当然这两个模式也可以混用。 global lock全局锁实现了写隔离与读隔离。 Undolog日志自动清理 缺点: 代码入侵性体现为配置Fescar的数据代理和加个注解,每个业务库都需要一个Undolog表。 从调用图中开源看出性能损耗有:一条Update的SQL,获取全局事务xid(TC通讯)、before image(查询)、after image(查询)、insert undo log(Undolog表的blob字段数据量可不小)、before commit(TC通讯,判断锁冲突);为了自动补偿在Undolog表花了不小开销,而且触发概率比较低。 二阶段commit也是需要占用系统资源。 二阶段回滚需要删除各节点的Undolog才能释放全局锁。 三、实验 本次实验使用的是官方提供的springcloud-eureka-feign-mybatis-seata工程,模拟远程调用超时异常;通过localhost:8180/order/create?userId=1&productId=1&count=10&money=100触发流程,order本地创建订单调用,远程storage扣减库存,远程扣减账户余额时候模拟该超时异常。下面展示下异常情况下日志信息: OrderServerApplication日志展示了事务增强拦截器GlobalTransactionalInterceptor12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970i.seata.tm.api.DefaultGlobalTransaction : Begin new global transaction [192.168.3.2:8091:2044579200] i.seata.sample.service.OrderServiceImpl : ------->交易开始 i.seata.sample.service.OrderServiceImpl : ------->扣减账户开始order中 i.s.core.rpc.netty.RmMessageListener : onMessage:xid=192.168.3.2:8091:2044579200,branchId=2044579202,branchType=AT,resourceId=jdbc:mysql://127.0.0.1/seat-order,applicationData=null io.seata.rm.AbstractRMHandler : Branch Rollbacking: 192.168.3.2:8091:2044579200 2044579202 jdbc:mysql://127.0.0.1/seat-order i.s.r.d.undo.AbstractUndoLogManager : xid 192.168.3.2:8091:2044579200 branch 2044579202, undo_log deleted with GlobalFinished io.seata.rm.AbstractRMHandler : Branch Rollbacked result: PhaseTwo_Rollbacked i.seata.tm.api.DefaultGlobalTransaction : [192.168.3.2:8091:2044579200] rollback status: Rollbacked o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is feign.RetryableException: Read timed out executing GET http://account-server/account/decrease?userId=1&money=100] with root causejava.net.SocketTimeoutException: Read timed out at java.net.SocketInputStream.socketRead0(Native Method) ~[na:1.8.0_231] at java.net.SocketInputStream.socketRead(SocketInputStream.java:116) ~[na:1.8.0_231] at java.net.SocketInputStream.read(SocketInputStream.java:171) ~[na:1.8.0_231] at java.net.SocketInputStream.read(SocketInputStream.java:141) ~[na:1.8.0_231] at java.io.BufferedInputStream.fill(BufferedInputStream.java:246) ~[na:1.8.0_231] at java.io.BufferedInputStream.read1(BufferedInputStream.java:286) ~[na:1.8.0_231] at java.io.BufferedInputStream.read(BufferedInputStream.java:345) ~[na:1.8.0_231] at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:735) ~[na:1.8.0_231] at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:678) ~[na:1.8.0_231] at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1593) ~[na:1.8.0_231] at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1498) ~[na:1.8.0_231] at java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:480) ~[na:1.8.0_231] at feign.Client$Default.convertResponse(Client.java:143) ~[feign-core-10.2.3.jar:na] at feign.Client$Default.execute(Client.java:68) ~[feign-core-10.2.3.jar:na] at com.alibaba.cloud.seata.feign.SeataFeignClient.execute(SeataFeignClient.java:57) ~[spring-cloud-alibaba-seata-2.1.0.RELEASE.jar:2.1.0.RELEASE] at org.springframework.cloud.openfeign.ribbon.FeignLoadBalancer.execute(FeignLoadBalancer.java:93) ~[spring-cloud-openfeign-core-2.1.2.RELEASE.jar:2.1.2.RELEASE] at org.springframework.cloud.openfeign.ribbon.FeignLoadBalancer.execute(FeignLoadBalancer.java:56) ~[spring-cloud-openfeign-core-2.1.2.RELEASE.jar:2.1.2.RELEASE] at com.netflix.client.AbstractLoadBalancerAwareClient$1.call(AbstractLoadBalancerAwareClient.java:104) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at com.netflix.loadbalancer.reactive.LoadBalancerCommand$3$1.call(LoadBalancerCommand.java:303) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at com.netflix.loadbalancer.reactive.LoadBalancerCommand$3$1.call(LoadBalancerCommand.java:287) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at rx.internal.util.ScalarSynchronousObservable$3.call(ScalarSynchronousObservable.java:231) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.util.ScalarSynchronousObservable$3.call(ScalarSynchronousObservable.java:228) ~[rxjava-1.3.8.jar:1.3.8] at rx.Observable.unsafeSubscribe(Observable.java:10327) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeConcatMap$ConcatMapSubscriber.drain(OnSubscribeConcatMap.java:286) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeConcatMap$ConcatMapSubscriber.onNext(OnSubscribeConcatMap.java:144) ~[rxjava-1.3.8.jar:1.3.8] at com.netflix.loadbalancer.reactive.LoadBalancerCommand$1.call(LoadBalancerCommand.java:185) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at com.netflix.loadbalancer.reactive.LoadBalancerCommand$1.call(LoadBalancerCommand.java:180) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at rx.Observable.unsafeSubscribe(Observable.java:10327) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeConcatMap.call(OnSubscribeConcatMap.java:94) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeConcatMap.call(OnSubscribeConcatMap.java:42) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) ~[rxjava-1.3.8.jar:1.3.8] at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) ~[rxjava-1.3.8.jar:1.3.8] at rx.Observable.subscribe(Observable.java:10423) ~[rxjava-1.3.8.jar:1.3.8] at rx.Observable.subscribe(Observable.java:10390) ~[rxjava-1.3.8.jar:1.3.8] at rx.observables.BlockingObservable.blockForSingle(BlockingObservable.java:443) ~[rxjava-1.3.8.jar:1.3.8] at rx.observables.BlockingObservable.single(BlockingObservable.java:340) ~[rxjava-1.3.8.jar:1.3.8] at com.netflix.client.AbstractLoadBalancerAwareClient.executeWithLoadBalancer(AbstractLoadBalancerAwareClient.java:112) ~[ribbon-loadbalancer-2.3.0.jar:2.3.0] at org.springframework.cloud.openfeign.ribbon.LoadBalancerFeignClient.execute(LoadBalancerFeignClient.java:83) ~[spring-cloud-openfeign-core-2.1.2.RELEASE.jar:2.1.2.RELEASE] at com.alibaba.cloud.seata.feign.SeataLoadBalancerFeignClient.execute(SeataLoadBalancerFeignClient.java:56) ~[spring-cloud-alibaba-seata-2.1.0.RELEASE.jar:2.1.0.RELEASE] at feign.SynchronousMethodHandler.executeAndDecode(SynchronousMethodHandler.java:108) ~[feign-core-10.2.3.jar:na] at feign.SynchronousMethodHandler.invoke(SynchronousMethodHandler.java:78) ~[feign-core-10.2.3.jar:na] at feign.ReflectiveFeign$FeignInvocationHandler.invoke(ReflectiveFeign.java:103) ~[feign-core-10.2.3.jar:na] at com.sun.proxy.$Proxy111.decrease(Unknown Source) ~[na:na] at io.seata.sample.service.OrderServiceImpl.create(OrderServiceImpl.java:50) ~[classes/:na] at io.seata.sample.service.OrderServiceImpl$$FastClassBySpringCGLIB$$3d2d368a.invoke(<generated>) ~[classes/:na] at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) ~[spring-core-5.1.9.RELEASE.jar:5.1.9.RELEASE] at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:749) ~[spring-aop-5.1.9.RELEASE.jar:5.1.9.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) ~[spring-aop-5.1.9.RELEASE.jar:5.1.9.RELEASE] at io.seata.spring.annotation.GlobalTransactionalInterceptor$1.execute(GlobalTransactionalInterceptor.java:109) ~[seata-all-1.2.0.jar:1.2.0] at io.seata.tm.api.TransactionalTemplate.execute(TransactionalTemplate.java:104) ~[seata-all-1.2.0.jar:1.2.0] at io.seata.spring.annotation.GlobalTransactionalInterceptor.handleGlobalTransaction(GlobalTransactionalInterceptor.java:106) ~[seata-all-1.2.0.jar:1.2.0] at io.seata.spring.annotation.GlobalTransactionalInterceptor.invoke(GlobalTransactionalInterceptor.java:83) ~[seata-all-1.2.0.jar:1.2.0] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.1.9.RELEASE.jar:5.1.9.RELEASE] at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:688) ~[spring-aop-5.1.9.RELEASE.jar:5.1.9.RELEASE] at io.seata.sample.service.OrderServiceImpl$$EnhancerBySpringCGLIB$$9c1f4d2e.create(<generated>) ~[classes/:na] at io.seata.sample.controller.OrderController.create(OrderController.java:29) ~[classes/:na]........省略异常 StorageServerApplication日志展示事务分支Branch Rollbacked123456i.s.sample.service.StorageServiceImpl : ------->扣减库存开始i.s.sample.service.StorageServiceImpl : ------->扣减库存结束c.a.c.seata.web.SeataHandlerInterceptor : xid in change during RPC from 192.168.3.2:8091:2044579200 to nulli.s.core.rpc.netty.RmMessageListener : onMessage:xid=192.168.3.2:8091:2044579200,branchId=2044579204,branchType=AT,resourceId=jdbc:mysql://127.0.0.1/seat-storage,applicationData=nullio.seata.rm.AbstractRMHandler : Branch Rollbacking: 192.168.3.2:8091:2044579200 2044579204 jdbc:mysql://127.0.0.1/seat-storagei.s.r.d.undo.AbstractUndoLogManager : xid 192.168.3.2:8091:2044579200 branch 2044579204, undo_log deleted with GlobalFinished : Branch Rollbacked result: PhaseTwo_Rollbacked AccountServerApplication日志出现sql exception1234567891011121314i.s.sample.service.AccountServiceImpl : ------->扣减账户开始account中i.s.r.d.exec.AbstractDMLBaseExecutor : execute executeAutoCommitTrue error:io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]java.sql.SQLException: io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]...省略一些重要异常堆栈信息2020-05-30 23:31:56.652 WARN 5960 --- [nio-8181-exec-2] c.a.c.seata.web.SeataHandlerInterceptor : xid in change during RPC from 192.168.3.2:8091:2044579200 to null2020-05-30 23:31:56.654 ERROR 5960 --- [nio-8181-exec-2] o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is org.springframework.jdbc.UncategorizedSQLException: ### Error updating database. Cause: java.sql.SQLException: io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]### The error may exist in file [E:\\document\\GitHub\\seata-samples-master\\springcloud-eureka-feign-mybatis-seata\\account-server\\target\\classes\\mapper\\AccountMapper.xml]### The error may involve defaultParameterMap### The error occurred while setting parameters### SQL: UPDATE account SET residue = residue - ?,used = used + ? where user_id = ?;### Cause: java.sql.SQLException: io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]; uncategorized SQLException; SQL state [null]; error code [0]; io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]; nested exception is java.sql.SQLException: io.seata.core.exception.RmTransactionException: Response[ TransactionException[192.168.3.2:8091:2044579200] ]] with root cause 四、分布式事务公共模块1、创建工程common_fescar,引入依赖123456789101112131415<properties> <fescar.version>0.4.2</fescar.version></properties><dependencies> <dependency> <groupId>com.alibaba.fescar</groupId> <artifactId>fescar-tm</artifactId> <version>${fescar.version}</version> </dependency> <dependency> <groupId>com.alibaba.fescar</groupId> <artifactId>fescar-spring</artifactId> <version>${fescar.version}</version> </dependency></dependencies> 2、将fescar配置文件拷贝到resources工程下 3、资源提供者每个线程绑定一个XID123456789101112131415161718192021222324252627282930313233public class FescarRMRequestFilter extends OncePerRequestFilter { private static final Logger LOGGER = org.slf4j.LoggerFactory.getLogger( FescarRMRequestFilter.class); /** * 给每次线程请求绑定一个XID * @param request * @param response * @param filterChain */ @Override protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException { String currentXID = request.getHeader( FescarAutoConfiguration.FESCAR_XID); if(!StringUtils.isEmpty(currentXID)){ RootContext.bind(currentXID); LOGGER.info(\"当前线程绑定的XID :\" + currentXID); } try{ filterChain.doFilter(request, response); } finally { String unbindXID = RootContext.unbind(); if(unbindXID != null){ LOGGER.info(\"当前线程从指定XID中解绑 XID :\" + unbindXID); if(!currentXID.equals(unbindXID)){ LOGGER.info(\"当前线程的XID发生变更\"); } } if(currentXID != null){ LOGGER.info(\"当前线程的XID发生变更\"); } } }} 4、RestInterceptor过滤器,每次请求都将XID转发到其他微服务1234567891011121314151617181920public class FescarRestInterceptor implements RequestInterceptor, ClientHttpRequestInterceptor { @Override public void apply(RequestTemplate requestTemplate) { String xid = RootContext.getXID(); if(!StringUtils.isEmpty(xid)){ requestTemplate.header( FescarAutoConfiguration.FESCAR_XID, xid); } } @Override public ClientHttpResponse intercept(HttpRequest request, byte[] body, ClientHttpRequestExecution execution) throws IOException { String xid = RootContext.getXID(); if(!StringUtils.isEmpty(xid)){ HttpHeaders headers = request.getHeaders(); headers.put( FescarAutoConfiguration.FESCAR_XID, Collections.singletonList(xid)); } return execution.execute(request, body); }} 5、创建FescarAutoConfiguration类12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576/** * * 创建数据源 * * 定义全局事务管理器扫描对象 * * 给所有RestTemplate添加头信息防止微服务之间调用问题 */@Configurationpublic class FescarAutoConfiguration { public static final String FESCAR_XID = \"fescarXID\"; /*** * 创建代理数据库 * @param environment * @return */ @Bean public DataSource dataSource(Environment environment){ DruidDataSource dataSource = new DruidDataSource(); dataSource.setUrl(environment.getProperty(\"spring.datasource.url\")); try { dataSource.setDriver(DriverManager.getDriver(environment.getProperty(\"spring.datasource.url\"))); } catch (SQLException e) { throw new RuntimeException(\"can't recognize dataSource Driver\"); } dataSource.setUsername(environment.getProperty(\"spring.datasource.username\")); dataSource.setPassword(environment.getProperty(\"spring.datasource.password\")); return new DataSourceProxy(dataSource); } /*** * 全局事务扫描器 * 用来解析带有@GlobalTransactional注解的方法,然后采用AOP的机制控制事务 * @param environment * @return */ @Bean public GlobalTransactionScanner globalTransactionScanner(Environment environment){ String applicationName = environment.getProperty(\"spring.application.name\"); String groupName = environment.getProperty(\"fescar.group.name\"); if(applicationName == null){ return new GlobalTransactionScanner(groupName == null ? \"my_test_tx_group\" : groupName); }else{ return new GlobalTransactionScanner(applicationName, groupName == null ? \"my_test_tx_group\" : groupName); } } /*** * 每次微服务和微服务之间相互调用 * 要想控制全局事务,每次TM都会请求TC生成一个XID,每次执行下一个事务,也就是调用其他微服务的时候都需要将该XID传递过去 * 所以我们可以每次请求的时候,都获取头中的XID,并将XID传递到下一个微服务 * @param restTemplates * @return */ @ConditionalOnBean({RestTemplate.class}) @Bean public Object addFescarInterceptor(Collection<RestTemplate> restTemplates){ restTemplates.stream() .forEach(restTemplate -> { List<ClientHttpRequestInterceptor> interceptors = restTemplate.getInterceptors(); if(interceptors != null){ interceptors.add(fescarRestInterceptor()); } }); return new Object(); } @Bean public FescarRMRequestFilter fescarRMRequestFilter(){ return new FescarRMRequestFilter(); } @Bean public FescarRestInterceptor fescarRestInterceptor(){ return new FescarRestInterceptor(); }} 6、记得将涉及到分布式事务的每个数据库都新建一个Undolog表五、参考资料 官方实验Demo集合 本次博客实验所用Demo 分布式事务框架Fescar在SpringCloud环境下的应用实践","tags":[{"name":"SpringCloud","slug":"SpringCloud","permalink":"https://caochikai.github.io/tags/SpringCloud/"}]},{"title":"TX-LCN分布式事务实践","date":"2020-05-30T10:11:00.000Z","path":"2020/05/30/TX-LCN分布式事务实践/","text":"TX-LCN分布式事务实践一、需求 扣减库存服务和生成订单服务对应不同数据库,Spring本地事务@Transactional并不能解决跨库跨服务保证数据一致性。分布式事务一般包含事务的发起者和参与者、关系型数据库资源服务以及事务管理器;Distributed Transaction Framework流行的主要有TX-LCN和阿里的seata框架,下一篇会调研下seata框架。 TX-LCN分布式事务框架是一款开源分布式事务框架,由两大模块组成TxClient和TxManager,TxClient扮演发起者和参与者,TxManager扮演事务管理器协调事务;从开发角度讲,TxClient指的是是我们自己的服务系统,TxManager是事务中心的协调系统;从Github RELEASE版本发布看,最新是5.0.2.RELEASE(支持LCN TXC TCC 三种事务模式),项目为了稳定使用v4.1.0(默认只支持LCN模式);LCN模式基本原理是代理切面拦截所有数据库链接的提交和回滚,由代理连接对象控制本地事务的真正的提交、回滚和释放。若是存在与非关系型数据库redis,就需要TCC模式补偿操作,来保证非关系redis和关系mysql整体一致性。 二、开始准备1、准备mysql和redis环境,通过spring initializr快速准备eureka注册中心。2、下载v4.1.0版本,tx-lcn-4.1.0是spring boot项目需要eureka注册中心服务和redis,运行启动类com.codingapi.tm.TxManagerApplication。3、访问http://127.0.0.1:8899/TxManager管理界面,注意两个属性负载均衡服务器地址的端口和当前连接数,这是实验成功的截图,一开始当前连接数应该是0。 upload successful 4、springcloud LCN分布式事务v4.0 示例demo 根据教程引导创建相应的数据库和修改配置,重点标注重要配置。我们重点关系jdbc版本的springcloud-jdbc-demo,它涉及到了5个业务模块,工程从1到5对应端口port:8081到8085,控制器的接口前缀是localhost:port/demo,分list列表接口和save接口;在save方法中,在demo3(调用4和5,自己)和demo1(调用2和3,自己)是事务发起方,两者差别是demo3注释了异常能正常返回insert 3条数据,而demo1打开异常触发分布式事务回滚insert数据;demo2、demo4和demo5仅仅是事务参与方。 12345678910111213141516171819202122232425262728feign.hystrix.enabled=falsespring.datasource.driver-class-name = com.mysql.jdbc.Driverspring.datasource.url= jdbc:mysql://localhost:3306/testspring.datasource.username= rootspring.datasource.password=rootspring.datasource.initialize = trueinit-db= truespring.application.name = demo3server.port = 8083#${random.int[9000,9999]},注册中心端口要对应eureka.client.service-url.defaultZone=http://127.0.0.1:8761/eureka/feign.hystrix.enabled=true# 关于**springcloud-hystrix机制,选择信号量隔离** http://www.jianshu.com/p/b8d21248c9b1hystrix.command.default.execution.isolation.strategy= SEMAPHOREhystrix.command.default.execution.isolation.thread.timeoutInMilliseconds=5000#Ribbon的负载均衡策略,重试次数为0ribbon.NFLoadBalancerRuleClassName=com.netflix.loadbalancer.RandomRuleribbon.MaxAutoRetriesNextServer=0#**txmanager地址端口指的是TxManager管理界面的负载均衡服务器地址的端口**tm.manager.url=http://127.0.0.1:8899/tx/manager/logging.level.com.codingapi=debug 以demo1的DemoServiceImpl异常触发分布式为例,重点是@TxTransaction(isStart = true)标注事务发起方,否则ThreadLocal不会有groupid,那就不会有事务组,更不可能实现回滚事务。 未标记发起方出现异常则groupId为空情况:12342020-05-30 17:30:06.387 DEBUG 4964 --- [nio-8084-exec-8] c.c.t.s.interceptor.TransactionAspect : annotation-TransactionRunning-start---->2020-05-30 17:30:06.387 DEBUG 4964 --- [nio-8084-exec-8] c.c.t.a.s.impl.AspectBeforeServiceImpl : around--> groupId-> null,txTransactionLocal->null2020-05-30 17:30:06.387 DEBUG 4964 --- [nio-8084-exec-8] c.c.t.d.aspect.DataSourceAspect : getConnection-start---->2020-05-30 17:30:06.387 DEBUG 4964 --- [nio-8084-exec-8] c.c.tx.datasource.AbstractResourceProxy : loadConnection -> null ! DemoServiceImpl1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950package com.example.demo.service.impl;import com.example.demo.client.Demo2Client;import com.example.demo.client.Demo3Client;import com.example.demo.dao.TestDao;import com.example.demo.entity.Test;import com.example.demo.service.DemoService;import com.codingapi.tx.annotation.TxTransaction;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.stereotype.Service;import org.springframework.transaction.annotation.Transactional;import java.util.List;/** * Created by lorne on 2017/6/26. */@Servicepublic class DemoServiceImpl implements DemoService { @Autowired private TestDao testDao; @Autowired private Demo2Client demo2Client; @Autowired private Demo3Client demo3Client; @Override public List<Test> list() { return testDao.list(); } @Override **@TxTransaction(isStart = true)** @Transactional public int save() { int rs2 = demo2Client.save(); int rs3 = demo3Client.save(); int rs1 = testDao.save(); int v = 100/0; return rs1+rs2+rs3; }} 访问demo1的save接口1234567//访问save接口:http://localhost:8081/demo/save,触发异常Whitelabel Error PageThis application has no explicit mapping for /error, so you are seeing this as a fallback.Sat May 30 17:33:11 CST 2020There was an unexpected error (type=Internal Server Error, status=500).Demo3Client#save() failed and fallback failed. 触发回滚JdbcDemo2Application控制台正确日志:123456789101112#触发回滚2020-05-30 17:33:11.654 DEBUG 4468 --- [ntLoopGroup-2-1] c.c.tx.netty.handler.TransactionHandler : TxManager-response->{\"a\":\"t\",\"c\":0,\"t\":\"9Fxhh19M\",\"k\":\"62kQVGPh\"}2020-05-30 17:33:11.654 INFO 4468 --- [ool-1-thread-19] c.c.t.c.service.impl.ActionTServiceImpl : accept notify data ->{\"a\":\"t\",\"c\":0,\"t\":\"9Fxhh19M\",\"k\":\"62kQVGPh\"}lcn transaction over, res -> groupId:3OBwlhvN and state is rollback2020-05-30 17:33:11.657 DEBUG 4468 --- [ Thread-28] c.c.t.d.relational.LCNDBConnection : lcnConnection closed groupId:3OBwlhvN2020-05-30 17:33:11.658 INFO 4468 --- [ool-1-thread-19] c.c.t.c.service.impl.ActionTServiceImpl : accept notify response res ->12020-05-30 17:33:11.658 DEBUG 4468 --- [ool-1-thread-19] .c.t.c.s.i.TransactionControlServiceImpl : send notify data ->{\"p\":{\"d\":\"1\"},\"a\":\"t\",\"k\":\"62kQVGPh\"}2020-05-30 17:33:11.659 DEBUG 4468 --- [ntLoopGroup-2-1] c.c.tx.netty.handler.TransactionHandler : TxManager-response->{\"d\":\"\",\"k\":\"62kQVGPh\"}#clent和manager的心跳数据2020-05-30 17:33:26.659 DEBUG 4468 --- [ntLoopGroup-2-1] c.c.tx.netty.handler.TransactionHandler : hart data --->{\"p\":\"{}\",\"a\":\"h\",\"k\":\"h\"}2020-05-30 17:33:26.659 DEBUG 4468 --- [ntLoopGroup-2-1] c.c.tx.netty.handler.TransactionHandler : TxManager-response->{\"d\":\"5\",\"k\":\"h\"} 三、4.0与5.0版本差别 5.0版本从1月份开始大量提交,并已经交由codingApi团队开发维护,两个版本的注解源码是不同的。 12345678910111213141516171819202122232425262728/** **4.0版本** * Created by lorne on 2017/6/26. */@Target({ElementType.METHOD, ElementType.TYPE})@Retention(RetentionPolicy.RUNTIME)@Inherited@Documentedpublic @interface TxTransaction { /** * 是否LCN事务发起方 * @return true 是:是发起方 false 否:是参与方 */ boolean isStart() default false; /** * 回滚异常 * @return */ Class<? extends Throwable>[] rollbackFor() default {}; /** * 不回滚异常 * @return */ Class<? extends Throwable>[] noRollbackFor() default {};} 12345678910111213141516171819202122232425/****5.0版本** * Created by lorne on 2017/6/26. */@Target({ElementType.METHOD, ElementType.TYPE})@Retention(RetentionPolicy.RUNTIME)@Inherited@Documentedpublic @interface TxTransaction { /** * 事务模式 transaction type * * @return lcn, tcc, txc * @see Transactions */ String type() default Transactions.LCN; /** * 分布式事务传播行为 * * @return 传播行为 * @see DTXPropagation */ DTXPropagation propagation() default DTXPropagation.REQUIRED;} 四、参考资料 分布式事务从0到1-认识分布式事务 codingapi/tx-lcn springcloud LCN分布式事务v4.0 示例demo","tags":[{"name":"SpringCloud","slug":"SpringCloud","permalink":"https://caochikai.github.io/tags/SpringCloud/"}]},{"title":"SpringBoot canal数据同步解决方案","date":"2020-05-28T14:31:00.000Z","path":"2020/05/28/SpringBoot-canal数据同步解决方案/","text":"SpringBoot canal数据同步解决方案一、需求 微服务多数据库情况下可以使用canal替代触发器,canal是应阿里巴巴跨机房同步的业务需求而提出的,canal基于数据库的日志解析,获取变更进行增量订阅&消费的业务。无论是canal实验需要还是为了增量备份、主从复制和恢复,都是需要开启mysql-binlog日志,数据目录设置到不同的磁盘分区可以降低io等待。 canal 工作原理 canal 模拟 MySQL slave 的交互协议,伪装自己为 MySQL slave ,向 MySQL master 发送dump 协议 MySQL master 收到 dump 请求,开始推送 binary log 给 slave (即 canal ) canal 解析 binary log 对象(原始为 byte 流) 二、部署环境1、登录mysql查看是否开启binlog,标红的log_bin默认是OFF关12345678910111213141516171819202122232425mysql> show variables like 'log_%';+----------------------------------------+-------------------------------------------------------+| Variable_name | Value |+----------------------------------------+-------------------------------------------------------+| **log_bin | OFF** || log_bin_basename | || log_bin_index | || log_bin_trust_function_creators | OFF || log_bin_use_v1_row_events | OFF || log_builtin_as_identified_by_password | OFF || log_error | F:\\tools\\mysql-5.7.28-winx64\\Data\\DESKTOP-C1LU9IQ.err || log_error_verbosity | 3 || log_output | FILE || log_queries_not_using_indexes | OFF || log_slave_updates | OFF || log_slow_admin_statements | OFF || log_slow_slave_statements | OFF || log_statements_unsafe_for_binlog | ON || log_syslog | ON || log_syslog_tag | || log_throttle_queries_not_using_indexes | 0 || log_timestamps | UTC || log_warnings | 2 |+----------------------------------------+-------------------------------------------------------+19 rows in set (0.03 sec) 2、编辑配置文件1234567891011121314151617181920212223242526272829303132333435[mysqld]# 设置3306端口port=3306# 设置mysql的安装目录,按照个人的实际需要改basedir=F:\\\\tools\\\\mysql-5.7.28-winx64 # 切记此处一定要用双斜杠\\\\,单斜杠我这里会出错,不过看别人的教程,有的是单斜杠。自己尝试吧# 设置mysql数据库的数据的存放目录datadir=F:\\\\tools\\\\mysql-5.7.28-winx64\\\\Data # 此处同上# 允许最大连接数max_connections=200# 允许连接失败的次数。这是为了防止有人从该主机试图攻击数据库系统max_connect_errors=10# 服务端使用的字符集默认为UTF8character-set-server=utf8# 创建新表时将使用的默认存储引擎default-storage-engine=INNODB# 默认使用“mysql_native_password”插件认证default_authentication_plugin=mysql_native_passwordlower_case_table_names=2sql_mode = STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTIONmax_connections=1000#实验重点配置 # 开启 binloglog-bin=mysql-bin# 选择 ROW 模式binlog-format=ROW # 配置 MySQL replaction 需要定义,不要和 canal 的 slaveId 重复server_id=1 [mysql]# 设置mysql客户端默认字符集default-character-set=utf8[client]# 设置mysql客户端连接服务端时默认使用的端口port=3306default-character-set=utf8 3、创建MySQL slave 的权限canal账户并且进行远程连接授权1234CREATE USER canal IDENTIFIED BY 'canal'; GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';-- GRANT ALL PRIVILEGES ON *.* TO 'canal'@'%' ;FLUSH PRIVILEGES; 4、记得重启mysql服务12345Linux:systemctl restart mysqldWindow:net stop mysql;net start mysql; 三、canal快速部署配置1、修改配置conf/example/instance.properties123456789101112131415161718## mysql serverIdcanal.instance.mysql.slaveId = 1234#position info,需要改成自己的数据库信息canal.instance.master.address = 127.0.0.1:3306 canal.instance.master.journal.name = canal.instance.master.position = canal.instance.master.timestamp = #canal.instance.standby.address = #canal.instance.standby.journal.name =#canal.instance.standby.position = #canal.instance.standby.timestamp = #username/password,需要改成自己的数据库信息canal.instance.dbUsername = canal canal.instance.dbPassword = canalcanal.instance.defaultDatabaseName =canal.instance.connectionCharset = UTF-8#table regexcanal.instance.filter.regex = .\\*\\\\\\\\..\\* 2、通过启动脚本运行:sh bin/startup.sh3、查看 server 日志和instance 的日志1234567891011121314151617$ tail -f logs/canal/canal.log2020-05-28 13:52:03.037 [main] INFO com.alibaba.otter.canal.deployer.CanalLauncher - ## set default uncaught exception handler2020-05-28 13:52:03.065 [main] INFO com.alibaba.otter.canal.deployer.CanalLauncher - ## load canal configurations2020-05-28 13:52:03.072 [main] INFO com.alibaba.otter.canal.deployer.CanalStarter - ## start the canal server.2020-05-28 13:52:03.444 [main] INFO com.alibaba.otter.canal.deployer.CanalController - ## start the canal server[172.36.58.25(172.36.58.25):11111]2020-05-28 13:52:04.604 [main] INFO com.alibaba.otter.canal.deployer.CanalStarter - ## the canal server is running now ......$ tail -f logs/example/example.log2020-05-28 13:52:04.238 [main] WARN o.s.beans.GenericTypeAwarePropertyDescriptor - Invalid JavaBean property 'connectionCharset' being accessed! Ambiguous write methods found next to actually used [public void com.alibaba.otter.canal.parse.inbound.mysql.AbstractMysqlEventParser.setConnectionCharset(java.lang.String)]: [public void com.alibaba.otter.canal.parse.inbound.mysql.AbstractMysqlEventParser.setConnectionCharset(java.nio.charset.Charset)]2020-05-28 13:52:04.264 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [canal.properties]2020-05-28 13:52:04.265 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [example/instance.properties]2020-05-28 13:52:04.568 [main] INFO c.a.otter.canal.instance.spring.CanalInstanceWithSpring - start CannalInstance for 1-example2020-05-28 13:52:04.572 [main] WARN c.a.o.canal.parse.inbound.mysql.dbsync.LogEventConvert - --> init table filter : ^.*\\..*$2020-05-28 13:52:04.573 [main] WARN c.a.o.canal.parse.inbound.mysql.dbsync.LogEventConvert - --> init table black filter :2020-05-28 13:52:04.577 [main] INFO c.a.otter.canal.instance.core.AbstractCanalInstance - start successful....2020-05-28 13:52:04.616 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - ---> begin to find start position, it will be long time for reset or first position2020-05-28 13:52:04.616 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - prepare to find start position just show master status2020-05-28 13:52:06.556 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - ---> find start position successfully, EntryPosition[included=false,journalName=mysql-bin.000001,position=4,serverId=1,gtid=<null>,timestamp=1590644973000] cost : 1935ms , the next step is binlog dump 四、初步监听实验12345<dependency> <groupId>com.alibaba.otter</groupId> <artifactId>canal.client</artifactId> <version>1.1.0</version> </dependency> 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495import java.net.InetSocketAddress;import java.util.List;import com.alibaba.otter.canal.client.CanalConnectors;import com.alibaba.otter.canal.client.CanalConnector;import com.alibaba.otter.canal.common.utils.AddressUtils;import com.alibaba.otter.canal.protocol.Message;import com.alibaba.otter.canal.protocol.CanalEntry.Column;import com.alibaba.otter.canal.protocol.CanalEntry.Entry;import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;import com.alibaba.otter.canal.protocol.CanalEntry.EventType;import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;import com.alibaba.otter.canal.protocol.CanalEntry.RowData;public class SimpleCanalClientExample { public static void main(String args[]) { // 创建链接 CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress(AddressUtils.getHostIp(), 11111), \"example\", \"\", \"\"); int batchSize = 1000; int emptyCount = 0; try { connector.connect(); connector.subscribe(\".*\\\\..*\"); connector.rollback(); int totalEmptyCount = 120; while (emptyCount < totalEmptyCount) { Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { emptyCount++; System.out.println(\"empty count : \" + emptyCount); try { Thread.sleep(1000); } catch (InterruptedException e) { } } else { emptyCount = 0; // System.out.printf(\"message[batchId=%s,size=%s] \\n\", batchId, size); printEntry(message.getEntries()); } connector.ack(batchId); // 提交确认 // connector.rollback(batchId); // 处理失败, 回滚数据 } System.out.println(\"empty too many times, exit\"); } finally { connector.disconnect(); } } private static void printEntry(List<Entry> entrys) { for (Entry entry : entrys) { if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) { continue; } RowChange rowChage = null; try { rowChage = RowChange.parseFrom(entry.getStoreValue()); } catch (Exception e) { throw new RuntimeException(\"ERROR ## parser of eromanga-event has an error , data:\" + entry.toString(), e); } EventType eventType = rowChage.getEventType(); System.out.println(String.format(\"================&gt; binlog[%s:%s] , name[%s,%s] , eventType : %s\", entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(), entry.getHeader().getSchemaName(), entry.getHeader().getTableName(), eventType)); for (RowData rowData : rowChage.getRowDatasList()) { if (eventType == EventType.DELETE) { printColumn(rowData.getBeforeColumnsList()); } else if (eventType == EventType.INSERT) { printColumn(rowData.getAfterColumnsList()); } else { System.out.println(\"-------&gt; before\"); printColumn(rowData.getBeforeColumnsList()); System.out.println(\"-------&gt; after\"); printColumn(rowData.getAfterColumnsList()); } } } } private static void printColumn(List<Column> columns) { for (Column column : columns) { System.out.println(column.getName() + \" : \" + column.getValue() + \" update=\" + column.getUpdated()); } }} 随便插入数据触发1INSERT INTO `demo`.`tb_ad`(`id`, `url`, `status`, `position`, `image`, `start_time`, `end_time`) VALUES (1, 'https://www.baidu.com/', '1', 'web_index_lb', 'https://pics1.baidu.com/feed/c83d70cf3bc79f3d5c30d358deb67a17738b29a6.jpeg?https://kins.oss-cn-shenzhen.aliyuncs.com/yhzb/2020-03-11/ca21b3b17d6f4757b991dd86b8cef3fa-VIP-680.jpeg', '2020-05-22 10:58:08', '2021-06-01 10:58:14'); 从控制台中看到12345678910111213empty count : 66empty count : 67empty count : 68empty count : 69empty count : 70================&gt; binlog[mysql-bin.000001:355] , name[demo,tb_ad] , eventType : INSERTid : 2 update=trueurl : https://www.baidu.com/ update=truestatus : 1 update=trueposition : web_index_lb update=trueimage : https://pics1.baidu.com/feed/c83d70cf3bc79f3d5c30d358deb67a17738b29a6.jpeg?https://kins.oss-cn-shenzhen.aliyuncs.com/yhzb/2020-03-11/ca21b3b17d6f4757b991dd86b8cef3fa-VIP-680.jpeg update=truestart_time : 2020-05-22 10:58:08 update=trueend_time : 2021-06-01 10:58:14 update=true 五、数据监控微服务1234567<!-- 第三方starter快速整合canal https://github.com/NormanGyllenhaal/canal-client--><!-- https://mvnrepository.com/artifact/top.javatool/canal-spring-boot-starter --><dependency> <groupId>top.javatool</groupId> <artifactId>canal-spring-boot-starter</artifactId> <version>1.2.1-RELEASE</version></dependency> 订阅数据库的增删改操作12345678910111213141516171819202122232425import org.slf4j.Logger;import org.slf4j.LoggerFactory;import org.springframework.stereotype.Component;import top.javatool.canal.client.annotation.CanalTable;import top.javatool.canal.client.handler.EntryHandler;@Component@CanalTable(value = \"t_user\")public class UserHandler implements EntryHandler<User> { private Logger logger = LoggerFactory.getLogger(UserHandler.class); public void insert(User user) { logger.info(\"insert message {}\", user); } public void update(User before, User after) { logger.info(\"update before {} \", before); logger.info(\"update after {}\", after); } public void delete(User user) { logger.info(\"delete {}\", user); }} 启动数据监控微服务,修改user表,观察控制台输出。12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849502020-05-28 16:23:22.667 INFO 24284 --- [l-client-thread] t.j.c.client.client.AbstractCanalClient : 获取消息 Message[id=23,entries=[header { version: 1 logfileName: \"mysql-bin.000001\" logfileOffset: 18380 serverId: 1 serverenCode: \"UTF-8\" executeTime: 1590654201000 sourceType: MYSQL schemaName: \"\" tableName: \"\" eventLength: 68}entryType: TRANSACTIONBEGINstoreValue: \" \\025\", header { version: 1 logfileName: \"mysql-bin.000001\" logfileOffset: 18505 serverId: 1 serverenCode: \"UTF-8\" executeTime: 1590654201000 sourceType: MYSQL schemaName: \"demo\" tableName: \"t_user\" eventLength: 88 eventType: UPDATE props { key: \"rowsCount\" value: \"1\" }}entryType: ROWDATAstoreValue: \"\\b\\210\\002\\020\\002P\\000b\\370\\003\\n\\033\\b\\000\\020\\004\\032\\002id \\001(\\0000\\000B\\00221R\\aint(11)\\n*\\b\\001\\020\\f\\032\\tuser_name \\000(\\0000\\000B\\005ZeldaR\\fvarchar(255)\\n*\\b\\002\\020\\372\\377\\377\\377\\377\\377\\377\\377\\377\\001\\032\\006gender \\000(\\0000\\000B\\0010R\\ntinyint(4)\\n\\\"\\b\\003\\020\\004\\032\\ncountry_id \\000(\\0000\\000B\\0011R\\aint(11)\\n&\\b\\004\\020[\\032\\bbirthday \\000(\\0000\\000B\\n1998-04-18R\\004date\\n7\\b\\005\\020]\\032\\vcreate_time \\000(\\0000\\000B\\0231991-01-10 05:45:50R\\ttimestamp\\022\\033\\b\\000\\020\\004\\032\\002id \\001(\\0000\\000B\\00221R\\aint(11)\\022.\\b\\001\\020\\f\\032\\tuser_name \\000(\\0010\\000B\\tZelda1111R\\fvarchar(255)\\022*\\b\\002\\020\\372\\377\\377\\377\\377\\377\\377\\377\\377\\001\\032\\006gender \\000(\\0000\\000B\\0010R\\ntinyint(4)\\022\\\"\\b\\003\\020\\004\\032\\ncountry_id \\000(\\0000\\000B\\0011R\\aint(11)\\022&\\b\\004\\020[\\032\\bbirthday \\000(\\0000\\000B\\n1998-04-18R\\004date\\0227\\b\\005\\020]\\032\\vcreate_time \\000(\\0000\\000B\\0231991-01-10 05:45:50R\\ttimestamp\", header { version: 1 logfileName: \"mysql-bin.000001\" logfileOffset: 18593 serverId: 1 serverenCode: \"UTF-8\" executeTime: 1590654201000 sourceType: MYSQL schemaName: \"\" tableName: \"\" eventLength: 31}entryType: TRANSACTIONENDstoreValue: \"\\022\\0041574\"],raw=false,rawEntries=[]]2020-05-28 16:23:22.668 INFO 24284 --- [xecute-thread-6] t.j.canal.example.handler.UserHandler : update before User{id=null, userName='Zelda', gender=null, countryId=null, birthday=null, createTime=null} 2020-05-28 16:23:22.668 INFO 24284 --- [xecute-thread-6] t.j.canal.example.handler.UserHandler : update after User{id=21, userName='Zelda1111', gender=0, countryId=1, birthday=Sat Apr 18 00:00:00 CST 1998, createTime=Thu Jan 10 05:45:50 CST 1991}","tags":[{"name":"Springboot","slug":"Springboot","permalink":"https://caochikai.github.io/tags/Springboot/"}]},{"title":"Swagger关于令牌校验","date":"2020-05-27T14:56:00.000Z","path":"2020/05/27/Swagger关于令牌校验/","text":"Swagger关于令牌校验一、需求 微服务oauth2校验令牌,通过Swagger2的securitySchemes配置全局token参数,也可以创建一个默认测试用户默认启动时候登陆获取永久期限的令牌。 二、swagger配置实现1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374@Configuration@EnableSwagger2public class SwaggerConfig { /** * 创建一个Docket对象 * 调用select()方法, * 生成ApiSelectorBuilder对象实例,该对象负责定义外漏的API入口 * 通过使用RequestHandlerSelectors和PathSelectors来提供Predicate,在此我们使用any()方法,将所有API都通过Swagger进行文档管理 * * @return */ @Bean public Docket createRestApi() { return new Docket(DocumentationType.SWAGGER_2) .apiInfo(apiInfo()) .select() .apis(RequestHandlerSelectors.basePackage(\"com.xxx\")) .paths(PathSelectors.any()) .build() .securitySchemes(securitySchemes()) .securityContexts(securityContexts()) .globalOperationParameters(globalOperationParameters()); } private List<Parameter> globalOperationParameters(){ //添加默认head参数Authorization ParameterBuilder tokenPar = new ParameterBuilder(); List<Parameter> pars = new ArrayList<Parameter>(); tokenPar.name(\"Authorization\").defaultValue(\"Bearer \").description(\"令牌\").modelRef(new ModelRef(\"string\")).parameterType(\"header\").required(false).build(); pars.add(tokenPar.build()); return pars; } private List<ApiKey> securitySchemes() { List<ApiKey> apiKeyList = new ArrayList(); apiKeyList.add(new ApiKey(\"Authorization\", \"Authorization\", \"header\")); return apiKeyList; } private List<SecurityContext> securityContexts() { List<SecurityContext> securityContexts = new ArrayList<>();//通过PathSelectors.regex(\"^(?!auth).*$\"),排除包含\"auth\"的接口不需要使用securitySchemes securityContexts.add( SecurityContext.builder() .securityReferences(defaultAuth()) .forPaths(PathSelectors.regex(\"^(?!auth).*$\")) .build()); return securityContexts; } List<SecurityReference> defaultAuth() { AuthorizationScope authorizationScope = new AuthorizationScope(\"global\", \"accessEverything\"); AuthorizationScope[] authorizationScopes = new AuthorizationScope[1]; authorizationScopes[0] = authorizationScope; List<SecurityReference> securityReferences = new ArrayList<>(); securityReferences.add(new SecurityReference(\"Authorization\", authorizationScopes)); return securityReferences; } private ApiInfo apiInfo() { return new ApiInfoBuilder() //标题 .title(\"gold-mall-biz使用Swagger2构建RESTful APIs\") //简介 .description(\"\") //服务条款 .termsOfServiceUrl(\"\") //作者个人信息 .contact(new Contact(\"xxx\", \"\", \"xxx@163.com\")) //版本 .version(\"1.0\") .build(); }} 设置完成后进入SwaggerUI,右上角出现“Authorization”按钮,输入令牌对于除上文所述的包含auth的接口结果都会带上token。 upload successful","tags":[{"name":"swagger","slug":"swagger","permalink":"https://caochikai.github.io/tags/swagger/"}]},{"title":"postman测试脚本自定义token","date":"2020-05-26T14:42:00.000Z","path":"2020/05/26/postman测试脚本自定义token/","text":"postman测试脚本自定义token一、需求 postman在测试微服务需要携带oauth2校验令牌Authorization:Bearer ,通过environment variable环境和test Script脚本,针对单次请求或者整个Collections设置自定义请求,甚至可以编排测试流程做自动化测试。environment一般来说分开发、测试和生产环境,从variable角度分environment variable(环境变量)、gloab variable(全局变量)和Collections variable(集合变量),对应了多项目多环境的测试需求;当然postman不足地方不能做自动化UI测试,遇到bug自动截屏或者内部堆栈记录报告;此外,如何从流行的swagger导入postman,OpenAPI与swagger哪个更适合新需求,个人推荐OpenAPI,它与postman更为兼容。 二、设置token环境变量设置postman的环境变量,添加变量token和host upload successful 在登录请求写test Script脚本获取access_token设置环境变量token,参考官方API文档 12var data = JSON.parse(responseBody);pm.environment.set(\"token\",data.access_token); 单次请求中一次性使用在Headers添加Authorization:Bearer ;如果想复用token,在下图右下角红框点击Prese进入Manage Prese,header添加到Prese管理,在其他请求里选择是否使用该header(手动添加)。 upload successful Pre-request Script全局Collections下所有请求使用该自定义请求头,右键Collections选择Edit进入编辑。 upload successful filename already exists, renamed 1pm.request.headers.upsert({ key: \"Authorization\", value: \"Bearer \" + pm.environment.get(\"token\"), disabled: false}); 三、通过console是否已经携带token,查看请求日志 四、swagger导入postman诀窍 无论通过http://localhost:9999/v2/api-docs下载成json再导入,还是通过链接导入都不能;因为postman目前只支持OpenAPI。 filename already exists, renamed 正确做法是通过swagger在线编辑器导入swagger文档,点击File选择import URL输入本地swagger url,导入成功点击Edit看见Cover to OpenAPI3,转化成openapi3后。 upload successful upload successful 复制openapi脚本通过postman左上角import选择Raw Text导入,如下图所示 upload successful 五、参考资料如下: Test examples Postman之Pre-request Script 使用详解","tags":[{"name":"tool","slug":"tool","permalink":"https://caochikai.github.io/tags/tool/"}]},{"title":"Jmeter压力测试openresty多级缓存","date":"2020-05-25T12:32:00.000Z","path":"2020/05/25/Jmeter压力测试openresty多级缓存/","text":"Jmeter压力测试openresty多级缓存一、快速使用 从Jmeter官网下载Apache JMeter 5.3 (Requires Java 8+); 解压apache-jmeter-5.3.zip打开apache-jmeter-5.3\\bin\\jmeter.bat,前提是JDK环境变量配好和版本满足要求; 进入GUI界面后默认是英语,点击菜单栏【Options】按钮,依次单击【Choose language】>【Chinese(simplified)】; 如果不喜欢黑暗主题,菜单栏【Options】下的选择第一个外观 二、新建压测实例 参照上一篇文章OpenResty+lua+redis实现多级缓存,我对多级缓存进行压测,先进行缓存预热,再对其进行压测;本地压测虽然不太标准,线程组200循环10次,普通tomcat的sql查询吞吐量TPS100/sec左右,加了redis缓存250/sec左右,OpenResty可以查看报告大概稳定9800/sec(线程2000,循环10次);为了提升压测结果,可以选择增加数据库连接池和tomcat的最大连接数和初始连接数,以及缓冲区大小优化;先进方式就利用大量测试进行机器学习,找到相对优秀的配置组合。 添加本次测试计划 (Test Plan右键–>添加–>Threads(Users)–>线程组) 设置线程数(并发用户数)和循环次数 upload successful 添加Http请求协议及相关配置信息(Thread Group右键–>添加–>取样器–>Http请求) upload successful 为线程添加监听器——察看结果树、聚合报告和图形结果 upload successful 启动测试计划,查看测试报告 upload successful 三、参考资料 jmeter如何设置语言为中文 分析JMeter聚合报告中的各项指标","tags":[{"name":"jmeter","slug":"jmeter","permalink":"https://caochikai.github.io/tags/jmeter/"}]},{"title":"OpenResty+lua+redis实现多级缓存","date":"2020-05-23T10:59:00.000Z","path":"2020/05/23/OpenResty-lua-redis实现广告缓存/","text":"OpenResty+lua+redis实现多级缓存一、需求 ngx_openresty是一个基于 NGINX的lua可编程模块,在性能方面有着出色的性能,配合redis做二级缓存效果,nginx开启一级本地缓存。 实验数据库sql12345678910111213141516171819202122SET NAMES utf8mb4;SET FOREIGN_KEY_CHECKS = 0;-- ------------------------------ Table structure for tb_ad-- ----------------------------DROP TABLE IF EXISTS `tb_ad`;CREATE TABLE `tb_ad` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '广告主键', `url` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT 'URL', `status` char(1) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '0:无效 1:有效', `position` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '广告位置', `image` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '图片路径', `start_time` datetime(0) NULL DEFAULT NULL COMMENT '开始时间', `end_time` datetime(0) NULL DEFAULT NULL COMMENT '到期时间', PRIMARY KEY (`id`) USING BTREE) ENGINE = InnoDB AUTO_INCREMENT = 2 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;-- ------------------------------ Records of tb_ad-- ----------------------------INSERT INTO `tb_ad` VALUES (1, 'https://www.baidu.com/', '1', 'web_index_lb', 'https://kins.oss-cn-shenzhen.aliyuncs.com/yhzb/2020-03-11/ca21b3b17d6f4757b991dd86b8cef3fa-VIP-680.jpeg', '2020-05-22 10:58:08', '2021-06-01 10:58:14'); 二、一二级缓存实现 查询mysql中上架的所有广告转换为json字符串,放入redis中作为二级缓存,等真正的第一次查询会把缓存加入一级本地缓存。当广告修改时候需要请求,需要请求预热接口。 nginx.conf文件配置123456789101112131415161718192021222324252627282930313233343536373839404142434445464748#user nobody;user root root;worker_processes 1;#error_log logs/error.log;#error_log logs/error.log notice;#error_log logs/error.log info;#pid logs/nginx.pid;events { worker_connections 1024;}http { include mime.types; default_type application/octet-stream; sendfile on; #tcp_nopush on; #keepalive_timeout 0; keepalive_timeout 65; #gzip on; #包含redis初始化模块 lua_shared_dict dis_cache 5m; #共享内存开启 server { listen 80; server_name localhost; charset utf-8; #access_log logs/host.access.log main; # 添加预热和读取接口 location /ad_loading{ content_by_lua_file /root/lua/ad_loading.lua; } location /ad_read { content_by_lua_file /root/lua/ad_read.lua; } # redirect server error pages to the static page /50x.html # error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } }} ad_loading.lua预热脚本123456789101112131415161718192021222324252627282930313233343536373839ngx.header.content_type=\"application/json;charset=utf8\"local cjson = require(\"cjson\")local mysql = require(\"resty.mysql\")local uri_args = ngx.req.get_uri_args()local position = uri_args[\"position\"]local db = mysql:new()db:set_timeout(1000) local props = { host = \"127.0.0.1\", port = 3306, database = \"business\", user = \"root\", password = \"root\" }local res = db:connect(props) local select_sql = \"select url,image from tb_ad where status ='1' and position='\"..position..\"' and start_time<= NOW() AND end_time>= NOW()\" res = db:query(select_sql) db:close() local redis = require(\"resty.redis\")local red = redis:new()red:set_timeout(2000)local ip =\"127.0.0.1\"local port = 6379red:connect(ip,port)--Redis Authenticationlocal result, err = red:auth(\"redis\")if not result then ngx.say(\"failed to authenticate: \", err) returnendred:set(\"ad_\"..position,cjson.encode(res))red:close()ngx.say(\"{flag:true}\") ad_read.lua读取脚本1234567891011121314151617181920212223242526272829303132--设置响应头类型ngx.header.content_type=\"application/json;charset=utf8\"--获取请求中的参数IDlocal uri_args = ngx.req.get_uri_args();local position = uri_args[\"position\"];--获取本地缓存local cache_ngx = ngx.shared.dis_cache;--根据ID 获取本地缓存数据local adCache = cache_ngx:get('ad_cache_'..position);if adCache == \"\" or adCache == nil then local redis = require(\"resty.redis\"); local red = redis:new() red:set_timeout(2000) local ok, err = red:connect(\"127.0.0.1\", 6379) --Redis Authentication local result, err = red:auth(\"redis\") if not result then ngx.say(\"failed to authenticate: \", err) return end local rescontent=red:get(\"ad_\"..position) ngx.say(rescontent) red:close() --将redis中获取到广告数据存入nginx本地缓存 cache_ngx:set('ad_cache_'..position, rescontent, 10*60);else --nginx本地缓存中获取到数据直接输出 ngx.say(adCache)end 三、拓展方向 lua + nginx可以作为网关进行限流、流控和多级缓存使用,内存小可用性也非常高,配合一些lua模块可以做一些深入拓展。","tags":[{"name":"nginx","slug":"nginx","permalink":"https://caochikai.github.io/tags/nginx/"},{"name":"redis","slug":"redis","permalink":"https://caochikai.github.io/tags/redis/"}]},{"title":"防止jenkins杀掉jar后台运行","date":"2020-05-22T12:09:00.000Z","path":"2020/05/22/防止jenkins杀掉jar后台运行/","text":"一、背景 记录两年前jenkins持续集成遇到需要任务后台执行(nohup执行)结果发现jenkins的job执行完后,看不到运行的进程。shell启动脚本如下: 1nohup java -jar /bootdo.jar > bootdolog.file 2>&1 & 二、解决方法 Jenkins任务结束时候默认使用processTreeKiller自动关掉了所有的子进程,通过下列两种方式都可以让其在后台运行。全局关闭,具体参考:https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller。 全局启动参数1java -Dhudson.util.ProcessTree.disable=true -jar jenkins.war Tomcat启动修改catalina.sh1JAVA_OPTS=\"$JAVA_OPTS -Dhudson.util.ProcessTree.disable=true\"; 当前job便捷方式12#BUILD_ID可以自定义环境变量名,下面只是举例BUILD_ID=dontKillMe /usr/apache/bin/httpd","tags":[{"name":"jenkins","slug":"jenkins","permalink":"https://caochikai.github.io/tags/jenkins/"}]},{"title":"Jenkins常见技巧和插件加速","date":"2020-05-21T06:53:00.000Z","path":"2020/05/21/Jenkins常见技巧和插件加速/","text":"Jenkins常见技巧和插件加速展望计划 在微服务单体仓库、GitLab和k8s环境下,如何配合测试、运维完成Devops。参数化自动和手动构建同时按需增量,同时多次更新合并到最后一次,成功失败发送邮件和信息到运维。包括静态代码检查、自动化测试和容器热更新,同时探讨JenkinsX和Gitlab CI提高构建效率。 一、如何快速启动、停止、重启、查看日志 采用通用的URL方式,就可以实现Jenins的停止,重启和重载。启动java -jar jenkins.war方式,或者通过tomcat也可。 12345http://[jenkins-server-address][:port]/[command] where [command] can beexit to shutdown jenkinsrestart to restart jenkinsreload to reload the configuration 查看日志根据Jenkins官方文档,tail -f 日志文件位置,遇到过由于docket插件造成日志文件爆磁盘。 123456LinuxBy default logs should be made available in /var/log/jenkins/jenkins.log, unless customized in /etc/default/jenkins (for *.deb) or via /etc/sysconfig/jenkins (for */rpm)WindowsBy default logs should be at %JENKINS_HOME%/jenkins.out and %JENKINS_HOME%/jenkins.err, unless customized in %JENKINS_HOME%/jenkins.xmlDockerIf you run Jenkins inside docker as a detached container, you can use docker logs containerId to view the Jenkins logs. 在Jenkins内部日志是可以设置logs级别和信息,默认使用java.util.logging输出INFO级别日志。插件日志可以添加log recorder,就可以查看到源码级别日志输出,如图所示: https://www.jenkins.io/doc/book/resources/managing/logging-manage-screen.png https://img2018.cnblogs.com/blog/692500/201908/692500-20190811165844377-1762367867.png 二、国内镜像加速插件和版本更新 官方镜像很慢几乎不可用,国内有两家镜像源:清华大学和华为,个人选择华为速度起飞︿( ̄︶ ̄)︿。需要改动两个文件hudson.model.UpdateCenter.xml和/updates/default.json,更新完成通过http://localhost:8080/restart重启,下载插件速度飙升跑满宽带。 镜像列表 清华大学镜像:https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json 华为镜像:https://mirrors.huaweicloud.com/jenkins/updates/update-center.json 123456789vim hudson.model.UpdateCenter.xml<?xml version='1.1' encoding='UTF-8'?><sites> <site> <id>default</id> <url>https://mirrors.huaweicloud.com/jenkins/updates/update-center.json</url> </site></sites> 123vim updates/default.json:1,$s/http:\\/\\/updates.jenkins-ci.org\\/download/https:\\/\\/mirrors.huaweicloud.com\\/jenkins/g:1,$s/http:\\/\\/www.google.com/https:\\/\\/www.baidu.com/g 三、参考资料 Jenkins技巧:如何启动、停止、重启、重载Jenkins Viewing logs Table of Contents Logs on the system Jenkins增加日志查看内容. 如何查看Jenkins插件的日志? jenkins | 使用镜像加速安装插件","tags":[{"name":"jenkins","slug":"jenkins","permalink":"https://caochikai.github.io/tags/jenkins/"}]},{"title":"Spring Cloud Feign连接池okhttp EOF异常","date":"2020-05-18T07:07:00.000Z","path":"2020/05/18/Spring-Cloud-Feign连接池okhttp-EOF异常/","text":"Spring Cloud Feign连接池okhttp EOF异常一、异常说明 所有中台服务有一定概率经常返回远程调用返回null情况,而且在我进入公司之前已经出现过很长一段时间都未曾解决。我第一怀疑的是远程调用时间开销过长,通过埋点FeignFallback记录系统日志记录,一段时间过后sys_log大量类似下面记录,证明我的猜想是错误的;通过Linux tail查看jar日志,追踪源码发现是Okhttp java.io.EOFException: \\n not found: size=0 content= unexpected end,根据okhttp github issue可以得出结论,由于连接池ConnectionPool 默认keep-alive超时时间是5min,超过时间server端会主动关闭这个连接,导致输入流中断,进而抛出EOF 异常;另一种情况就是网络不稳定抖动时候也会出现。 埋点系统日志 id type title service_id create_by create_time update_time remote_addr user_agent request_uri method params time del_flag exception 5179 0 feign接口获取会员信息失败 pig 1868054xxxx 2020-05-15 13:40:17.0 2020-05-15 13:40:17.0 192.168.6.148 Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36 /user/info GET 5 0 EOF 异常控制台日志:123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237feign.RetryableException: unexpected end of stream on Connection{192.168.6.55:4000, proxy=DIRECT hostAddress=/192.168.6.55:4000 cipherSuite=none protocol=http/1.1} executing GET http://gold-admin/syssite/getSiteById/1 at feign.FeignException.errorExecuting(FeignException.java:132) at feign.SynchronousMethodHandler.executeAndDecode(SynchronousMethodHandler.java:113) at feign.SynchronousMethodHandler.invoke(SynchronousMethodHandler.java:78) at feign.hystrix.HystrixInvocationHandler$1.run(HystrixInvocationHandler.java:109) at com.netflix.hystrix.HystrixCommand$2.call(HystrixCommand.java:302) at com.netflix.hystrix.HystrixCommand$2.call(HystrixCommand.java:298) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:46) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:35) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:51) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:35) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:51) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:35) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeMap.call(OnSubscribeMap.java:48) at rx.internal.operators.OnSubscribeMap.call(OnSubscribeMap.java:33) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:41) at rx.internal.operators.OnSubscribeDoOnEach.call(OnSubscribeDoOnEach.java:30) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:51) at rx.internal.operators.OnSubscribeDefer.call(OnSubscribeDefer.java:35) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.subscribe(Observable.java:10423) at rx.Observable.subscribe(Observable.java:10390) at rx.internal.operators.BlockingOperatorToFuture.toFuture(BlockingOperatorToFuture.java:51) at rx.observables.BlockingObservable.toFuture(BlockingObservable.java:410) at com.netflix.hystrix.HystrixCommand.queue(HystrixCommand.java:378) at com.netflix.hystrix.HystrixCommand.execute(HystrixCommand.java:344) at feign.hystrix.HystrixInvocationHandler.invoke(HystrixInvocationHandler.java:170) at com.sun.proxy.$Proxy289.getSiteById(Unknown Source) at com.gdjs.gold.business.controller.BusinessPortalController.info(BusinessPortalController.java:58) at com.gdjs.gold.business.controller.BusinessPortalController$$FastClassBySpringCGLIB$$66c8c0df.invoke(<generated>) at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:749) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:688) at com.gdjs.gold.business.controller.BusinessPortalController$$EnhancerBySpringCGLIB$$261d70.info(<generated>) at sun.reflect.GeneratedMethodAccessor1225.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190) at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138) at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:104) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892) at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797) at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87) at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1039) at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:942) at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1005) at org.springframework.web.servlet.FrameworkServlet.doGet(FrameworkServlet.java:897) at javax.servlet.http.HttpServlet.service(HttpServlet.java:645) at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:882) at javax.servlet.http.HttpServlet.service(HttpServlet.java:750) at io.undertow.servlet.handlers.ServletHandler.handleRequest(ServletHandler.java:74) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:129) at org.springframework.boot.actuate.web.trace.servlet.HttpTraceFilter.doFilterInternal(HttpTraceFilter.java:88) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:320) at org.springframework.security.web.access.intercept.FilterSecurityInterceptor.invoke(FilterSecurityInterceptor.java:127) at org.springframework.security.web.access.intercept.FilterSecurityInterceptor.doFilter(FilterSecurityInterceptor.java:91) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.access.ExceptionTranslationFilter.doFilter(ExceptionTranslationFilter.java:119) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.session.SessionManagementFilter.doFilter(SessionManagementFilter.java:137) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.authentication.AnonymousAuthenticationFilter.doFilter(AnonymousAuthenticationFilter.java:111) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter.doFilter(SecurityContextHolderAwareRequestFilter.java:170) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.savedrequest.RequestCacheAwareFilter.doFilter(RequestCacheAwareFilter.java:63) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.oauth2.provider.authentication.OAuth2AuthenticationProcessingFilter.doFilter(OAuth2AuthenticationProcessingFilter.java:176) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.authentication.logout.LogoutFilter.doFilter(LogoutFilter.java:116) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.header.HeaderWriterFilter.doFilterInternal(HeaderWriterFilter.java:74) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.context.SecurityContextPersistenceFilter.doFilter(SecurityContextPersistenceFilter.java:105) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter.doFilterInternal(WebAsyncManagerIntegrationFilter.java:56) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at org.springframework.security.web.FilterChainProxy$VirtualFilterChain.doFilter(FilterChainProxy.java:334) at org.springframework.security.web.FilterChainProxy.doFilterInternal(FilterChainProxy.java:215) at org.springframework.security.web.FilterChainProxy.doFilter(FilterChainProxy.java:178) at org.springframework.web.filter.DelegatingFilterProxy.invokeDelegate(DelegatingFilterProxy.java:357) at org.springframework.web.filter.DelegatingFilterProxy.doFilter(DelegatingFilterProxy.java:270) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:99) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:92) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:93) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.cloud.sleuth.instrument.web.ExceptionLoggingFilter.doFilter(ExceptionLoggingFilter.java:50) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at brave.servlet.TracingFilter.doFilter(TracingFilter.java:99) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.boot.actuate.metrics.web.servlet.WebMvcMetricsFilter.filterAndRecordMetrics(WebMvcMetricsFilter.java:114) at org.springframework.boot.actuate.metrics.web.servlet.WebMvcMetricsFilter.doFilterInternal(WebMvcMetricsFilter.java:104) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:200) at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:109) at io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61) at io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131) at io.undertow.servlet.handlers.FilterHandler.handleRequest(FilterHandler.java:84) at io.undertow.servlet.handlers.security.ServletSecurityRoleHandler.handleRequest(ServletSecurityRoleHandler.java:62) at io.undertow.servlet.handlers.ServletChain$1.handleRequest(ServletChain.java:68) at io.undertow.servlet.handlers.ServletDispatchingHandler.handleRequest(ServletDispatchingHandler.java:36) at io.undertow.servlet.handlers.security.SSLInformationAssociationHandler.handleRequest(SSLInformationAssociationHandler.java:132) at io.undertow.servlet.handlers.security.ServletAuthenticationCallHandler.handleRequest(ServletAuthenticationCallHandler.java:57) at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43) at io.undertow.security.handlers.AbstractConfidentialityHandler.handleRequest(AbstractConfidentialityHandler.java:46) at io.undertow.servlet.handlers.security.ServletConfidentialityConstraintHandler.handleRequest(ServletConfidentialityConstraintHandler.java:64) at io.undertow.security.handlers.AuthenticationMechanismsHandler.handleRequest(AuthenticationMechanismsHandler.java:60) at io.undertow.servlet.handlers.security.CachedAuthenticatedSessionHandler.handleRequest(CachedAuthenticatedSessionHandler.java:77) at io.undertow.security.handlers.AbstractSecurityContextAssociationHandler.handleRequest(AbstractSecurityContextAssociationHandler.java:43) at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43) at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43) at io.undertow.servlet.handlers.ServletInitialHandler.handleFirstRequest(ServletInitialHandler.java:292) at io.undertow.servlet.handlers.ServletInitialHandler.access$100(ServletInitialHandler.java:81) at io.undertow.servlet.handlers.ServletInitialHandler$2.call(ServletInitialHandler.java:138) at io.undertow.servlet.handlers.ServletInitialHandler$2.call(ServletInitialHandler.java:135) at io.undertow.servlet.core.ServletRequestContextThreadSetupAction$1.call(ServletRequestContextThreadSetupAction.java:48) at io.undertow.servlet.core.ContextClassLoaderSetupAction$1.call(ContextClassLoaderSetupAction.java:43) at io.undertow.servlet.handlers.ServletInitialHandler.dispatchRequest(ServletInitialHandler.java:272) at io.undertow.servlet.handlers.ServletInitialHandler.access$000(ServletInitialHandler.java:81) at io.undertow.servlet.handlers.ServletInitialHandler$1.handleRequest(ServletInitialHandler.java:104) at io.undertow.server.Connectors.executeRootHandler(Connectors.java:364) at io.undertow.server.HttpServerExchange$1.run(HttpServerExchange.java:830) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748)Caused by: java.io.IOException: unexpected end of stream on Connection{192.168.6.55:4000, proxy=DIRECT hostAddress=/192.168.6.55:4000 cipherSuite=none protocol=http/1.1} at okhttp3.internal.http1.Http1Codec.readResponseHeaders(Http1Codec.java:208) at okhttp3.internal.http.CallServerInterceptor.intercept(CallServerInterceptor.java:88) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147) at okhttp3.internal.connection.ConnectInterceptor.intercept(ConnectInterceptor.java:45) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121) at okhttp3.internal.cache.CacheInterceptor.intercept(CacheInterceptor.java:93) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121) at okhttp3.internal.http.BridgeInterceptor.intercept(BridgeInterceptor.java:93) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147) at okhttp3.internal.http.RetryAndFollowUpInterceptor.intercept(RetryAndFollowUpInterceptor.java:126) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147) at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121) at okhttp3.RealCall.getResponseWithInterceptorChain(RealCall.java:200) at okhttp3.RealCall.execute(RealCall.java:77) at feign.okhttp.OkHttpClient.execute(OkHttpClient.java:167) at org.springframework.cloud.sleuth.instrument.web.client.feign.TracingFeignClient.execute(TracingFeignClient.java:100) at org.springframework.cloud.sleuth.instrument.web.client.feign.LazyTracingFeignClient.execute(LazyTracingFeignClient.java:60) at org.springframework.cloud.openfeign.ribbon.FeignLoadBalancer.execute(FeignLoadBalancer.java:93) at org.springframework.cloud.openfeign.ribbon.FeignLoadBalancer.execute(FeignLoadBalancer.java:56) at com.netflix.client.AbstractLoadBalancerAwareClient$1.call(AbstractLoadBalancerAwareClient.java:104) at com.netflix.loadbalancer.reactive.LoadBalancerCommand$3$1.call(LoadBalancerCommand.java:303) at com.netflix.loadbalancer.reactive.LoadBalancerCommand$3$1.call(LoadBalancerCommand.java:287) at rx.internal.util.ScalarSynchronousObservable$3.call(ScalarSynchronousObservable.java:231) at rx.internal.util.ScalarSynchronousObservable$3.call(ScalarSynchronousObservable.java:228) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeConcatMap$ConcatMapSubscriber.drain(OnSubscribeConcatMap.java:286) at rx.internal.operators.OnSubscribeConcatMap$ConcatMapSubscriber.onNext(OnSubscribeConcatMap.java:144) at com.netflix.loadbalancer.reactive.LoadBalancerCommand$1.call(LoadBalancerCommand.java:185) at com.netflix.loadbalancer.reactive.LoadBalancerCommand$1.call(LoadBalancerCommand.java:180) at rx.Observable.unsafeSubscribe(Observable.java:10327) at rx.internal.operators.OnSubscribeConcatMap.call(OnSubscribeConcatMap.java:94) at rx.internal.operators.OnSubscribeConcatMap.call(OnSubscribeConcatMap.java:42) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:48) at rx.internal.operators.OnSubscribeLift.call(OnSubscribeLift.java:30) at rx.Observable.subscribe(Observable.java:10423) at rx.Observable.subscribe(Observable.java:10390) at rx.observables.BlockingObservable.blockForSingle(BlockingObservable.java:443) at rx.observables.BlockingObservable.single(BlockingObservable.java:340) at com.netflix.client.AbstractLoadBalancerAwareClient.executeWithLoadBalancer(AbstractLoadBalancerAwareClient.java:112) at org.springframework.cloud.openfeign.ribbon.LoadBalancerFeignClient.execute(LoadBalancerFeignClient.java:83) at org.springframework.cloud.sleuth.instrument.web.client.feign.TraceLoadBalancerFeignClient.execute(TraceLoadBalancerFeignClient.java:71) at feign.SynchronousMethodHandler.executeAndDecode(SynchronousMethodHandler.java:108) ... 181 common frames omittedCaused by: java.io.EOFException: \\n not found: limit=0 content=… at okio.RealBufferedSource.readUtf8LineStrict(RealBufferedSource.java:237) at okhttp3.internal.http1.Http1Codec.readHeaderLine(Http1Codec.java:215) at okhttp3.internal.http1.Http1Codec.readResponseHeaders(Http1Codec.java:189) ... 226 common frames omitted 二、如何埋点这种概率性异常呢? Feign的Fallback机制可以进行日志埋点,由于长时间接触这个异常,发现规律都是因为登录后长时间不进行请求。下面就是埋点日志代码实现: 123456789101112131415161718192021222324/** * 系统日志工具类 * * @author caochikai */@UtilityClasspublic class SysLogUtils { public SysLog getSysLog() { HttpServletRequest request = ((ServletRequestAttributes) Objects .requireNonNull(RequestContextHolder.getRequestAttributes())).getRequest(); SysLog sysLog = new SysLog();//需要记录请求详细信息,根据需要改变细节 sysLog.setCreateBy(Objects.requireNonNull(getUsername())); sysLog.setType(CommonConstants.STATUS_NORMAL); sysLog.setRemoteAddr(ServletUtil.getClientIP(request)); sysLog.setRequestUri(URLUtil.getPath(request.getRequestURI())); sysLog.setMethod(request.getMethod()); sysLog.setUserAgent(request.getHeader(\"user-agent\")); sysLog.setParams(HttpUtil.toParams(request.getParameterMap())); sysLog.setServiceId(getClientId()); return sysLog; }....省略} 12345678910111213141516171819@Slf4j@Componentpublic class MallMemberFeignFallbackImpl implements MallMemberFeign { @Setter private Throwable cause;//Long startTime参数只是为了暂时记录远程调用的开始时间,会去掉 @Override public R<MallMember> getMemberByUserId(String userId, Long startTime) { log.error(\"通过userId获取会员详情失败{}\",userId, cause); SysLog logVo = SysLogUtils.getSysLog(); logVo.setTitle(\"feign接口获取会员信息失败\"); // 发送异步日志事件 Long endTime = System.currentTimeMillis(); logVo.setTime(endTime - startTime);//发送SysLogEvent事件 SpringContextHolder.publishEvent(new SysLogEvent(logVo)); return null; }} 1234567891011121314151617/** * @author caochikai * 异步监听日志事件 */@Slf4j@AllArgsConstructorpublic class SysLogListener { private final RemoteLogService remoteLogService; @Async @Order @EventListener(SysLogEvent.class) public void saveSysLog(SysLogEvent event) { SysLog sysLog = (SysLog) event.getSource(); remoteLogService.saveLog(sysLog); }} 三、Okhttp源码分析 在进行常规的breakpoint之后, 需要重点关注OkHttpLoadBalancingClient、RetryAndFollowUpInterceptor、Http1Codec和RealBufferedSource。 12345678910111213141516171819202122232425262728293031323334353637383940spring-cloud-netflix-ribbon-2.0.0.RELEASE-sources.jar!/org/springframework/cloud/netflix/ribbon/okhttp/OkHttpLoadBalancingClient.javapublic class OkHttpLoadBalancingClient extends AbstractLoadBalancingClient<OkHttpRibbonRequest, OkHttpRibbonResponse, OkHttpClient> { //...... @Override public OkHttpRibbonResponse execute(OkHttpRibbonRequest ribbonRequest, final IClientConfig configOverride) throws Exception { boolean secure = isSecure(configOverride); if (secure) { final URI secureUri = UriComponentsBuilder.fromUri(ribbonRequest.getUri()) .scheme(\"https\").build().toUri(); ribbonRequest = ribbonRequest.withNewUri(secureUri); }//Feign实际调用client OkHttpClient httpClient = getOkHttpClient(configOverride, secure); final Request request = ribbonRequest.toRequest(); Response response = httpClient.newCall(request).execute(); return new OkHttpRibbonResponse(response, ribbonRequest.getUri()); } OkHttpClient getOkHttpClient(IClientConfig configOverride, boolean secure) { IClientConfig config = configOverride != null ? configOverride : this.config; RibbonProperties ribbon = RibbonProperties.from(config);//建造者模式构建http请求 OkHttpClient.Builder builder = this.delegate.newBuilder() .connectTimeout(ribbon.connectTimeout(this.connectTimeout), TimeUnit.MILLISECONDS) .readTimeout(ribbon.readTimeout(this.readTimeout), TimeUnit.MILLISECONDS) .followRedirects(ribbon.isFollowRedirects(this.followRedirects)); if (secure) { builder.followSslRedirects(ribbon.isFollowRedirects(this.followRedirects)); } return builder.build(); } //......} 1234567891011121314151617181920212223242526272829303132333435public class RetryAndFollowUpInterceptor implements Interceptor { //.... @Override public Response intercept(Chain chain) throws IOException { //.... while(true) { try { response = realChain.proceed(request, streamAllocation, null, null); releaseConnection = false; } catch (IOException) { // An attempt to communicate with a server failed. The request may have been sent. boolean requestSendStarted = !(e instanceof ConnectionShutdownException); // 此处非常重要,若不允许恢复,直接将异常向上抛出(调用方接收),反之 循环继续执行realChain.proceed()方法 if (!recover(e, streamAllocation, requestSendStarted, request)) throw e; releaseConnection = false; continue; } } private boolean recover(IOException e, StreamAllocation streamAllocation, boolean requestSendStarted, Request userRequest) { // .... // The application layer has forbidden retries. // 若客户端配置 retryOnConnectionFailure 为false 则表明不允许重试,直接将异常抛给调用方 if (!client.retryOnConnectionFailure()) return false; } //....} 当执行realChain.proceed() ,将事件继续分发给各个拦截器,最终执行到 Http1Codec#readResponseHeaders 方法。Http1Codec 用于Encode Http Request 以及 解析 Http Response的,用于获取头信息相关参数。 1234567891011121314151617181920212223Http1Codec.javapublic class Http1Codec implements HttpCodec { //... @Override public Response.Builder readResponseHeaders(boolean expectContinue) throws IOException { try{ StatusLine statusLine = StatusLine.parse(readHeaderLine()); //后续代码根据statusLine 构造Response } catch(EOFException e) { // 原来异常信息就是从这里抛出来的,接下来需要重点关注下readHeaderLine方法 // Provide more context if the server ends the stream before sending a response. IOException exception = new IOException(\"unexpected end of stream on \" + streamAllocation); exception.initCause(e); } } private String readHeaderLine() throws IOException { // 继续查看RealBufferedSource#readUtf8LineStrict方法 String line = source.readUtf8LineStrict(headerLimit); headerLimit -= line.length(); return line; } //...} 12345678910111213141516171819202122RealBufferedSource.javafinal class RealBufferedSource implements BufferedSource { //... // 由于server端已经closed,故buffer == null 将EOF异常向上抛出 @Override public String readUtf8LineStrict(long limit) throws IOException { if (limit < 0) throw new IllegalArgumentException(\"limit < 0: \" + limit); long scanLength = limit == Long.MAX_VALUE ? Long.MAX_VALUE : limit + 1; long newline = indexOf((byte) '\\n', 0, scanLength); if (newline != -1) return buffer.readUtf8Line(newline); if (scanLength < Long.MAX_VALUE && request(scanLength) && buffer.getByte(scanLength - 1) == '\\r' && request(scanLength + 1) && buffer.getByte(scanLength) == '\\n') { return buffer.readUtf8Line(scanLength); // The line was 'limit' UTF-8 bytes followed by \\r\\n. } Buffer data = new Buffer(); buffer.copyTo(data, 0, Math.min(32, buffer.size())); //实际抛出异常的栈点 throw new EOFException(\"\\\\n not found: limit=\" + Math.min(buffer.size(), limit) + \" content=\" + data.readByteString().hex() + '…'); } //...} 四、解决办法12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576package com.common.core.config;import okhttp3.ConnectionPool;import okhttp3.OkHttpClient;import org.springframework.context.annotation.Bean;import org.springframework.context.annotation.Configuration;import javax.net.ssl.SSLContext;import javax.net.ssl.SSLSocketFactory;import javax.net.ssl.TrustManager;import javax.net.ssl.X509TrustManager;import java.security.KeyManagementException;import java.security.NoSuchAlgorithmException;import java.security.SecureRandom;import java.security.cert.CertificateException;import java.security.cert.X509Certificate;import java.util.concurrent.TimeUnit;/** * 配置 okhttp 与连接池 * Date 2019/12/11 */@Configurationpublic class OkHttpConfiguration { @Bean public OkHttpClient okHttpClient() { return new OkHttpClient.Builder()//解决重点在这里,发送异常允许重试请求 **.retryOnConnectionFailure(true)** .connectionPool(pool())//设置连接超时 .connectTimeout(3000, TimeUnit.SECONDS)//设置读超时 .readTimeout(3000, TimeUnit.SECONDS)//设置写超时 .writeTimeout(3000,TimeUnit.SECONDS) .build(); } @Bean public X509TrustManager x509TrustManager() { return new X509TrustManager() { @Override public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { } @Override public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { } @Override public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[0]; } }; } @Bean public SSLSocketFactory sslSocketFactory() { try { //信任任何链接 SSLContext sslContext = SSLContext.getInstance(\"TLS\"); sslContext.init(null, new TrustManager[]{x509TrustManager()}, new SecureRandom()); return sslContext.getSocketFactory(); } catch (NoSuchAlgorithmException e) { e.printStackTrace(); } catch (KeyManagementException e) { e.printStackTrace(); } return null; } @Bean public ConnectionPool pool() {//ConnectionPool 每个地址的最大空闲连接数200,保持5分钟长连接 return new ConnectionPool(200, 5, TimeUnit.MINUTES); }} 五、参考资料 Spring Cloud Feign 总结问题,注意点,性能调优,切换okhttp3 Spring cloud 超时及重试配置【ribbon及其它http client】 [记录某次解决`Okhttp java.io.EOFException: \\n not found: size=0 content= unexpected end](https://juejin.im/post/5b5efedef265da0f6825e235)","tags":[{"name":"SpringCloud","slug":"SpringCloud","permalink":"https://caochikai.github.io/tags/SpringCloud/"}]},{"title":"SpringCloud之Wesocket双向通信","date":"2020-05-12T06:47:00.000Z","path":"2020/05/12/SpringBoot之Wesocket双向通信/","text":"SpringCloud之Wesocket双向通信一、需求 运营中台向某个商家或者全部平台发送实时消息通知,websocket在微服务gateway网关和Oauth2鉴权的环境下,会遇到网关转发Authorization:Bearer 鉴权失败。由于websocket协议在握手时候是不支持Oauth2在header放置token,那么就有两种解决办法,第一种在网关处添加GlobalFilter转发header并且暴露websocket,第二种直接暴露websocket放开鉴权,登录之后直接链接websocket;有了双向通信功能就可以取代轮询,比如PC扫码支付后通知前端支付结果,具体实现参考Github Demo仓库在文章结尾。 二、第一种解决办法 Websocket握手协商报文如下,SockJS注意端点跨域: 12345HTTP/1.1 101 Switching ProtocolsUpgrade: websocketConnection: UpgradeSec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=Sec-WebSocket-Protocol: chat 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051@Componentpublic class WebsocketHandler implements GlobalFilter, Ordered { private final static String DEFAULT_FILTER_PATH = \"/user-service/info\"; /** * @param exchange ServerWebExchange是一个HTTP请求-响应交互的契约。提供对HTTP请求和响应的访问, * 并公开额外的 服务器 端处理相关属性和特性,如请求属性 * @param chain * @return */ @Override public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) { String upgrade = exchange.getRequest().getHeaders().getUpgrade(); URI requestUrl = exchange.getRequiredAttribute(GATEWAY_REQUEST_URL_ATTR); String scheme = requestUrl.getScheme(); if (\"websocket\".equals(upgrade)) { String queryParam = requestUrl.getRawQuery(); Map<String, String> paramMap = HttpUtil.decodeParamMap(queryParam, CharsetUtil.UTF_8); String token = paramMap.get(\"token\"); if (StrUtil.isNotBlank(token)) { //向headers中放token,记得build ServerHttpRequest host = exchange.getRequest().mutate().header(\"Authorization\", \"Bearer \" + token).build(); //将现在的request 变成 change对象 ServerWebExchange build = exchange.mutate().request(host).build(); return chain.filter(build); } } if (!\"ws\".equals(scheme) && !\"wss\".equals(scheme)) { return chain.filter(exchange); } else if (DEFAULT_FILTER_PATH.equals(requestUrl.getPath())) { String wsScheme = convertWsToHttp(scheme); URI wsRequestUrl = UriComponentsBuilder.fromUri(requestUrl).scheme(wsScheme).build().toUri(); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, wsRequestUrl); } return chain.filter(exchange); } @Override public int getOrder() { return Ordered.LOWEST_PRECEDENCE - 2; } static String convertWsToHttp(String scheme) { scheme = scheme.toLowerCase(); return \"ws\".equals(scheme) ? \"http\" : \"wss\".equals(scheme) ? \"https\" : scheme; }} 123456789101112131415161718192021222324252627282930313233public class TokenHandshakeInterceptor implements HandshakeInterceptor { /** * websocket握手认证在registry.addEndpoint(\"/websocket\")添加 */ @Override public boolean beforeHandshake(ServerHttpRequest request, ServerHttpResponse response, WebSocketHandler wsHandler, Map<String, Object> attributes) throws Exception { ServletServerHttpRequest req = (ServletServerHttpRequest) request; //获取token认证 String token = req.getServletRequest().getParameter(\"token\"); //解析token获取用户信息 Principal user = parseToken(token); if(user == null){ //如果token认证失败user为null,返回false拒绝握手 return false; } //保存认证用户 attributes.put(\"user\", user); return true; } @Override public void afterHandshake(ServerHttpRequest request, ServerHttpResponse response, WebSocketHandler wsHandler, Exception exception) { } /** * 根据token认证授权 * @param token */ private Principal parseToken(String token){ //TODO 解析token并获取认证用户信息,建议通过feight接口 return null; }} 三、第二种SockJS直接连接123456789101112131415161718192021222324252627282930313233343536373839404142@Configuration@EnableWebSocketMessageBrokerpublic class WebSocketConfig implements WebSocketMessageBrokerConfigurer {//注入跨域配置默认是* @Autowired private CommonConfig commonConfig; @Override public void configureMessageBroker(MessageBrokerRegistry registry) { /*类似mq的Broker主题,通常按中台设计有几大平台就有几类主题 * This enables a simple (in-memory) message broker for our application. * The `/topic` designates that any destination prefixed with `/topic` * will be routed back to the client. * It's important to keep in mind, this will not work with more than one * application instance, and it does not support all of the features a * full message broker like RabbitMQ, ActiveMQ, etc... provide. */ registry.enableSimpleBroker(Arrays.stream(ThemeEnum.values()).map(ThemeEnum::getTheme).toArray(String[]::new)); /*客户端发送消息开头 * The application destination prefix `/app` designates the broker to send * messages prefixed with `/app` to our `@MessageMapping`s. */ registry.setApplicationDestinationPrefixes(\"/app\"); } @Override public void registerStompEndpoints(StompEndpointRegistry registry) { /* * This configures a STOMP (Simple Text Oriented Messaging Protocol) * endpoint for our websocket to be hosted on }) */ registry.addEndpoint(\"/websocket\").setAllowedOrigins(commonConfig.getOrigins()).withSockJS(); /* * This configures an endpoint with a fallback for SockJS in case the * client (an old browser) doesn't support WebSockets natively */ registry.addEndpoint(\"/sockjs\").setAllowedOrigins(commonConfig.getOrigins()).withSockJS(); }} 四、网页端测试Demo123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208<!DOCTYPE html><html lang=\"en\"><head> <meta charset=\"UTF-8\"> <title>Title</title> <link href=\"https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.css\" rel=\"stylesheet\"/> <style> body { background-color: #f5f5f5; } #main-content { max-width: 940px; padding: 2em 3em; margin: 0 auto 20px; background-color: #fff; border: 1px solid #e5e5e5; -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; } .from { width: 120px; } .timeStamp { width: 220px; } </style> <script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.js\"></script> <script src=\"https://cdn.jsdelivr.net/npm/@stomp/stompjs@5.0.0/bundles/stomp.umd.js\"></script> <script src=\"https://cdn.jsdelivr.net/npm/sockjs-client@1/dist/sockjs.min.js\"></script> <script> var stompClient = null; //根据需要是否进行websocket配合oauth2进行鉴权 var token = \"754741bc-08c1-4433-ab05-c803c589b58f\"; function setConnected(connected) { $(\"#connect\").prop(\"disabled\", connected); $(\"#connectSockJS\").prop(\"disabled\", connected); $(\"#disconnect\").prop(\"disabled\", !connected); if (connected) { $(\"#responses\").show(); } else { $(\"#responses\").hide(); } $(\"#messages\").html(\"\"); } function frameHandler(frame) { setConnected(true); console.log('Connected: ' + frame); //SockJS订阅不同的主题,类似mq的消息队列模式 stompClient.subscribe('/topic', function (message) { showMessage(message.body); }); stompClient.subscribe('/business', function (message) { showMessage(message.body); }); stompClient.subscribe('/business/11', function (message) { showMessage(message.body); }); } function onSocketClose() { if (stompClient !== null) { stompClient.deactivate(); } setConnected(false); console.log(\"Socket was closed. Setting connected to false!\") } function connect() { stompClient = new window.StompJs.Client({ webSocketFactory: function () { return new WebSocket(\"ws://127.0.0.1:9999/common/websocket\"); } }); stompClient.onConnect = function (frame) { frameHandler(frame) }; stompClient.onWebsocketClose = function () { onSocketClose(); }; stompClient.activate(); } function connectSockJs() { stompClient = new window.StompJs.Client({ webSocketFactory: function () { return new window.SockJS(\"http://127.0.0.1:9999/common/sockjs\"); } }); stompClient.onConnect = function (frame) { frameHandler(frame) }; stompClient.onWebsocketClose = function () { onSocketClose(); }; stompClient.activate(); } function disconnect() { if (stompClient !== null) { stompClient.deactivate(); } setConnected(false); console.log(\"Disconnected\"); } function sendMessage() { stompClient.publish({ destination: \"/app/send\", body: JSON.stringify({ 'from': $(\"#from\").val(), 'message': $(\"#message\").val() }) }); } function showMessage(message) { var msg = JSON.parse(message); $(\"#responses\").prepend(\"<tr>\" + \"<td class='timeStamp'>\" + msg['timeStamp'] + \"</td>\" + \"<td class='from'>\" + msg['from'] + \"</td>\" + \"<td>\" + msg['message'] + \"</td>\" + \"</tr>\"); } $(function () { $(\"form\").on('submit', function (e) { e.preventDefault(); }); $(\"#connect\").click(function () { connect(); }); $(\"#connectSockJS\").click(function () { connectSockJs(); }); $(\"#disconnect\").click(function () { disconnect(); }); $(\"#send\").click(function () { sendMessage(); }); $(\"document\").ready(function () { disconnect(); }); }); </script></head><body><noscript><h2 style=\"color: #ff0000\">Seems your browser doesn't support Javascript! Websocket relies on Javascript being enabled. Please enable Javascript and reload this page!</h2></noscript><div class=\"container\" id=\"main-content\"> <div class=\"row\"> <div class=\"col-md-10\"> <form class=\"form-inline\"> <div class=\"form-group\"> <label for=\"connect\">WebSocket connection:</label> <button class=\"btn btn-default\" id=\"connect\" type=\"submit\">Connect</button> <button class=\"btn btn-default\" id=\"connectSockJS\" type=\"submit\">ConnectSockJS</button> </div> </form> </div> <div class=\"col-md-2\"> <form class=\"form-inline\"> <div class=\"form-group\"> <button class=\"btn btn-default\" disabled=\"disabled\" id=\"disconnect\" type=\"submit\"> Disconnect </button> </div> </form> </div> </div> <div class=\"row\"> <div class=\"col-md-12\"> <form class=\"form-inline\"> <div class=\"form-group\"> <label for=\"from\">Username:</label> <input class=\"form-control\" id=\"from\" placeholder=\"Username...\" type=\"text\"> <label for=\"message\">Message:</label> <input class=\"form-control\" id=\"message\" placeholder=\"Your message here...\" type=\"text\"> </div> <button class=\"btn btn-default\" id=\"send\" type=\"submit\">Send</button> </form> </div> </div> <div class=\"row\"> <div class=\"col-md-12\"> <table class=\"table table-striped\" id=\"responses\"> <thead> <tr> <th>Messages</th> </tr> </thead> <tbody id=\"messages\"> </tbody> </table> </div> </div></div></body></html> 五、参考Demo和资料 https://github.com/jmlw/demo-projects https://blog.joshmlwood.com/websockets-with-spring-boot/","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"SpringBoot之Redis定时发送消息","date":"2020-05-09T03:26:00.000Z","path":"2020/05/09/SpringBoot之Redis定时发送消息/","text":"SpringBoot之Redis定时发送消息一、需求 实时发送定时公告,倒计时功能通过监听Redis 缓存过期(Key 失效)事件。类似用途可以用于订单定时关闭,商品或活动上下架。 二、修改 redis.conf 文件,打开 notify-keyspace-events Ex 的注释,开启过期通知功能123456789101112131415161718192021222324252627282930313233343536373839404142434445############################# EVENT NOTIFICATION ############################### Redis can notify Pub/Sub clients about events happening in the key space.# This feature is documented at http://redis.io/topics/notifications## For instance if keyspace events notification is enabled, and a client# performs a DEL operation on key \"foo\" stored in the Database 0, two# messages will be published via Pub/Sub:## PUBLISH __keyspace@0__:foo del# PUBLISH __keyevent@0__:del foo## It is possible to select the events that Redis will notify among a set# of classes. Every class is identified by a single character:## K Keyspace events, published with __keyspace@<db>__ prefix.# E Keyevent events, published with __keyevent@<db>__ prefix.# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...# $ String commands# l List commands# s Set commands# h Hash commands# z Sorted set commands# x Expired events (events generated every time a key expires)# e Evicted events (events generated when a key is evicted for maxmemory)# A Alias for g$lshzxe, so that the \"AKE\" string means all the events.## The \"notify-keyspace-events\" takes as argument a string that is composed# of zero or multiple characters. The empty string means that notifications# are disabled.## Example: to enable list and generic events, from the point of view of the# event name, use:## notify-keyspace-events Elg## Example 2: to get the stream of the expired keys subscribing to channel# name __keyevent@0__:expired use:## notify-keyspace-events Ex## By default all notifications are disabled because most users don't need# this feature and the feature has some overhead. Note that if you don't# specify at least one of K or E, no events will be delivered.notify-keyspace-events \"\" 三、重启redis ,测试监听事件是否开启 keyevent@*:expired其实是指所有库,可以指定库下标监听16个默认数据库的某一个,比如*keyevent@1指定壹号库。打开redisclientA**,PSUBSCRIBE指令订阅事件。 12345127.0.0.1:6379> PSUBSCRIBE __keyevent@*__:expiredReading messages... (press Ctrl-C to quit)1) \"psubscribe\"2) \"__keyevent@*__:expired\"3) (integer) 1 再开启另一个redisclientB,发送过期数据指定事件2秒。 12127.0.0.1:6379> setex test 2 2OK redisclientA就会监听到redisclientB过期key 123456789127.0.0.1:6379> PSUBSCRIBE __keyevent@*__:expiredReading messages... (press Ctrl-C to quit)1) \"psubscribe\"2) \"__keyevent@*__:expired\"3) (integer) 11) \"pmessage\"2) \"__keyevent@*__:expired\"3) \"__keyevent@0__:expired\"4) \"test\" 四、监听器配置和实现 KeyExpirationEvent1MessageListener可参考org.springframework.data.redis.listener.KeyExpirationEventMessageListener源码实现的(默认订阅的是keyevent@*:expired),而我们目标是监听壹号库。因为0号库给限流和oauth2用了,里面存在很多短期key,会监听许多不相干的业务key缓存。此外,不能给KeyExpirationEvent1MessageListener加上@Component,因为存在bean循环依赖问题,可以通过SpringContextHolder解决。 123456789101112131415161718192021222324252627282930313233343536373839404142@Slf4jpublic class KeyExpirationEvent1MessageListener extends KeyExpirationEventMessageListener { private static final Topic KEYEVENT1_EXPIRED_TOPIC = new PatternTopic(\"__keyevent@1__:expired\"); /** * @param listenerContainer must not be {@literal null}. */ public KeyExpirationEvent1MessageListener(RedisMessageListenerContainer listenerContainer) { super(listenerContainer); } @Override public void doRegister(RedisMessageListenerContainer listenerContainer) { listenerContainer.addMessageListener(this, KEYEVENT1_EXPIRED_TOPIC); } @Override public void onMessage(Message message, byte[] pattern) { //获取过期的key String expireKey = message.toString(); //设置监听频道 if (expireKey.startsWith(RedisConstant.NOTIFY_RECEIVE)) { log.info(\"过期的键值对的消息ID:\" + expireKey); log.info(\"消息监听频道topic:\" + new String(message.getChannel()));//获取消息发送id,通过 String sendId = expireKey.substring(RedisConstant.NOTIFY_RECEIVE.length()); SysNotifySendService sysNotifySendService = SpringContextHolder.getBean(SysNotifySendService.class);//common服务提供websocket发送远程接口RemoteCommonService RemoteCommonService remoteCommonService = SpringContextHolder.getBean(RemoteCommonService.class); SysNotifyReceiveService receiveService = SpringContextHolder.getBean(SysNotifyReceiveService.class); SysNotifySend sysNotifySend = sysNotifySendService.getOne(Wrappers.<SysNotifySend>lambdaQuery().eq(SysNotifySend::getSendId, sendId)); com.gdjs.gold.admin.api.dto.Message websocketMsg = new com.gdjs.gold.admin.api.dto.Message(); websocketMsg.setSendId(sysNotifySend.getSendId()); websocketMsg.setFrom(sysNotifySend.getSendUserId()); websocketMsg.setDestination(ThemeEnum.BUSINESS.getTheme()); websocketMsg.setMessage(sysNotifySend.getContent()); remoteCommonService.sendMessage(websocketMsg); } }} 123456789101112131415@Configurationpublic class RedisListenerConfig { @Bean public RedisMessageListenerContainer container(RedisConnectionFactory connectionFactory, ApplicationContext context) { RedisMessageListenerContainer container = new RedisMessageListenerContainer(); container.setConnectionFactory(connectionFactory);//通过new监听器,并且往RedisMessageListenerContainer注册监听器 KeyExpirationEvent1MessageListener listener = new KeyExpirationEvent1MessageListener(container); listener.doRegister(container); listener.setApplicationEventPublisher(context); return container; }} 五、发送时候如何自定义切换指定redis库下标SpringBoot 1.X之前的版本123JedisConnectionFactory jedisConnectionFactory = (JedisConnectionFactory) stringRedisTemplate.getConnectionFactory();jedisConnectionFactory.setDatabase(切换到指定的db上);stringRedisTemplate.setConnectionFactory(jedisConnectionFactory); SpringBoot 2.X之后的版本,RedisConnectionFactory动态切换库必须是LettuceConnectionFactory,必须是配置RedisTemplate时候指定,下面看源码。 1234567891011121314151617public class RedisUtil { /** * 切换redis数据库 * * @param redisTemplate springboot封装的redis对象 * @param index 数据库下标 */ public static void select(RedisTemplate redisTemplate, int index) { LettuceConnectionFactory lettuceConnectionFactory = (LettuceConnectionFactory) redisTemplate.getConnectionFactory(); if (lettuceConnectionFactory != null) { lettuceConnectionFactory.setDatabase(index); redisTemplate.setConnectionFactory(lettuceConnectionFactory); lettuceConnectionFactory.resetConnection(); } }} 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051/** * @author caochikai * @date 2019/7/12 * Redis 配置类 */@EnableCaching@Configuration@AllArgsConstructor@AutoConfigureBefore(RedisAutoConfiguration.class)public class RedisTemplateConfig {//@AllArgsConstructor是构造器注入进来LettuceConnectionFactory private final LettuceConnectionFactory lcfactory; @Bean public RedisTemplate<String, Object> redisTemplate() { // 关闭共享链接,动态切换的重点在这里 **lcfactory.setShareNativeConnection(false);** RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>(); redisTemplate.setKeySerializer(new StringRedisSerializer()); redisTemplate.setHashKeySerializer(new StringRedisSerializer()); redisTemplate.setValueSerializer(new JdkSerializationRedisSerializer()); redisTemplate.setHashValueSerializer(new JdkSerializationRedisSerializer()); redisTemplate.setConnectionFactory(lcfactory); return redisTemplate; } @Bean public HashOperations<String, String, Object> hashOperations(RedisTemplate<String, Object> redisTemplate) { return redisTemplate.opsForHash(); } @Bean public ValueOperations<String, String> valueOperations(RedisTemplate<String, String> redisTemplate) { return redisTemplate.opsForValue(); } @Bean public ListOperations<String, Object> listOperations(RedisTemplate<String, Object> redisTemplate) { return redisTemplate.opsForList(); } @Bean public SetOperations<String, Object> setOperations(RedisTemplate<String, Object> redisTemplate) { return redisTemplate.opsForSet(); } @Bean public ZSetOperations<String, Object> zSetOperations(RedisTemplate<String, Object> redisTemplate) { return redisTemplate.opsForZSet(); }} 六、设置缓存时候技巧 key需要指定key命名规则前缀RedisConstant.NOTIFY_RECEIVE常量(通常取表名),随后加上表的主键。时间计算间距小技巧,博主业务使用的是LocalDateTime,Duration.between(LocalDateTime.now(), 指定发送时间).getSeconds(),过期时间单位使用的是TimeUnit枚举。切换到指定一号库后,记得切换回来零号库,减少对其他业务的影响 12345678910111213/** * redis缓存过期监听 * * @param sysNotifySend 定时发送消息 * @param message 消息内容 */ private void redisSetMsgKey(SysNotifySend sysNotifySend, Message message) { LocalDateTime sendTime = sysNotifySend.getSendTime(); String jsonString = JSONUtil.parseObj(message).toJSONString(0); RedisUtil.select(redisTemplate, 1); redisTemplate.opsForValue().set(RedisConstant.NOTIFY_RECEIVE + sysNotifySend.getSendId(), jsonString, Duration.between(LocalDateTime.now(), sendTime).getSeconds(), TimeUnit.SECONDS); RedisUtil.select(redisTemplate, 0); } 七、参考文章如下: SpringBoot2.0以上整合redis根据下标动态切换数据库 redis缓存过期策略,监听redis缓存","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"Thread分析","date":"2020-03-24T10:38:39.000Z","path":"2020/03/24/Thread分析/","text":"Thread分析一、多线程实现方式 继承Thread 实现Runable 使用FutureTask 使用Executor框架 二、Thread start方法源代码分析123456789101112131415161718192021222324252627282930313233343536/****start方法调用了start0方法,start0方法在JVM中,start0中的逻辑会调用run方法**。***一旦线程开始执行,jvm就会调用run方法;** ***线程只能启动一次,结束后无法重启。** * @exception IllegalThreadStateException if the thread was already * started. * @see #run() * @see #stop() */ public synchronized void start() { /** * 0代表线程状态为NEW * A zero status value corresponds to state \"NEW\". */ if (threadStatus != 0) throw new IllegalThreadStateException(); /* 通知组该线程即将开始将其添加到组的线程列表*中,并且该组的未启动计数可以减少。/ group.add(this); boolean started = false; try { start0(); started = true; } finally { try { if (!started) {//如果创建失败,线程数组将会删除该线程,未启动计数增加,具体看源码 group.threadStartFailed(this); } } catch (Throwable ignore) { /* 什么都不用做,直接被上层堆栈回调 */ } } }//start0是一个native方法 private native void start0(); 三、线程状态分析(6种)123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566/**在同一时间只有一种状态,只能反映JVM状态却不能反映出系统状态。 * * @since 1.5 * @see #getState */ public enum State { /****线程还未开始,刚刚新建。** * Thread state for a thread which has not yet started. */ NEW, /****可运行状态,正在Java虚拟机中执行,但可能正在等待来自操作系统的其他资源,例如处理器。** * Thread state for a runnable thread. A thread in the runnable * state is executing in the Java virtual machine but it may * be waiting for other resources from the operating system * such as processor. */ RUNNABLE, /****阻塞状态。等待锁资源进入同步代码块。** * Thread state for a thread blocked waiting for a monitor lock. * A thread in the blocked state is waiting for a monitor lock * to enter a synchronized block/method or * reenter a synchronized block/method after calling * {@link Object#wait() Object.wait}. */ BLOCKED, /****等待状态。可能因为如下方法进入该状态:** * Thread state for a waiting thread. * A thread is in the waiting state due to calling one of the * following methods: * <ul> * <li>{@link Object#wait() Object.wait} with no timeout</li> * <li>{@link #join() Thread.join} with no timeout</li> * <li>{@link LockSupport#park() LockSupport.park}</li> * </ul> *下面例子很形象 * For example, a thread that has called <tt>Object.wait()</tt> * on an object is waiting for another thread to call * <tt>Object.notify()</tt> or <tt>Object.notifyAll()</tt> on * that object. A thread that has called <tt>Thread.join()</tt> * is waiting for a specified thread to terminate. */ WAITING, /****等待一定的时间状态。可能因为如下方法进入该状态:** * Thread state for a waiting thread with a specified waiting time. * A thread is in the timed waiting state due to calling one of * the following methods with a specified positive waiting time: * <ul> * <li>{@link #sleep Thread.sleep}</li> * <li>{@link Object#wait(long) Object.wait} with timeout</li> * <li>{@link #join(long) Thread.join} with timeout</li> * <li>{@link LockSupport#parkNanos LockSupport.parkNanos}</li> * <li>{@link LockSupport#parkUntil LockSupport.parkUntil}</li> * </ul> */ TIMED_WAITING, /****结束状态。** * Thread state for a terminated thread. * The thread has completed execution. */ TERMINATED; } 四、最后Thread Run方法分析(模版方法模式) Runnable的实现对象通过构造函数传入Thread。 123public Thread(Runnable target) { init(null, target, \"Thread-\" + nextThreadNum(), 0);} Runnable实现作为target对象传递进来。再次调用了init方法。Thread的target被设置为你实现业务逻辑的Runnable实现。 1234567private void init(ThreadGroup g, Runnable target, String name, long stackSize, AccessControlContext acc, boolean inheritThreadLocals){...省略其他过程 this.target = target;} 当你传入了target,则会执行target的run方法。 @Override public void run() { if (target != null) { target.run(); } }","tags":[]},{"title":"数据结构之稀疏数组","date":"2020-03-21T08:48:00.000Z","path":"2020/03/21/数据结构之稀疏数组/","text":"数据结构之稀疏数组一、简介 稀疏数组可以简单的看作为是压缩,在开发中也会使用到。比如将数据序列化到磁盘上,减少数据量,在IO过程中提高效率等等。 二、压缩和恢复——稀疏数组1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889/** * @Description: 稀疏数组 ---> 数组中有很多无效数据,压缩数据 * @Author: Caochikai */public class SparseArrayMy { /** * 先初始化11 X 11 的二维数组代表棋盘,转化为稀疏数组的思路: * 1、先遍历二维数组 得到非0数据的有效个数sum * 2、根据sum有效个数创建对应的稀疏数组int[sum+1][3] * 3、一次读取有效A的位置x,y和值,存进稀疏数组 * * @param args */ public static void main(String[] args) { // 创建一个原始的二维数组 11 * 11 // 0: 表示没有棋子, 1 表示 黑子 2 表蓝子 int chessArr1[][] = new int[11][11]; chessArr1[1][2] = 1; chessArr1[2][3] = 2; chessArr1[4][5] = 2; // 输出原始的二维数组 System.out.println(\"原始的二维数组~~\"); for (int[] row : chessArr1) { for (int data : row) { System.out.printf(\"%d\\t\", data); } System.out.println(); } // 1、先遍历二维数组 得到非0数据的有效个数sum int sum = 0; for (int i = 0; i < 11; i++) { for (int j = 0; j < 11; j++) { if (chessArr1[i][j] != 0) { sum++; } } } System.out.println(\"二维数组的有效个数sum = \" + sum); //2、根据sum有效个数创建对应的稀疏数组int[sum+1][3] int sparseArr[][] = new int[sum + 1][3]; //第一行按稀疏数组定义记录总行数、总列数、个数总数 sparseArr[0][0] = 11; sparseArr[0][1] = 11; sparseArr[0][2] = sum; // 3、一次读取有效A的位置x,y和值,存进稀疏数组 int index = 0; for (int i = 0; i < 11; i++) { for (int j = 0; j < 11; j++) { if (chessArr1[i][j] != 0) { index++; sparseArr[index][0] = i; sparseArr[index][1] = j; sparseArr[index][2] = chessArr1[i][j]; } } } for (int[] ints : sparseArr) { for (int anInt : ints) { System.out.printf(\"%d\\t\", anInt); } System.out.println(); } /* * 将稀疏数组恢复成二维数组思路: * 1、更具第一行的x,y创建二维数组 * 2、遍历1到sum行,取得A有效数值的对应x,y,将值A存进对应二维数组位置 * */ int[][] oldArray = new int[sparseArr[0][0]][sparseArr[0][1]]; for (int i = 1; i <= sparseArr[0][2]; i++) { int x = sparseArr[i][0]; int y = sparseArr[i][1]; oldArray[x][y] = sparseArr[i][2]; } // 输出恢复后的二维数组 System.out.println(); System.out.println(\"恢复后的二维数组\"); for (int[] ints : oldArray) { for (int anInt : ints) { System.out.printf(\"%d\\t\", anInt); } System.out.println(); } }} 三、输出结果1234567891011121314151617181920212223242526272829303132原始的二维数组~~0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 二维数组的有效个数sum = 311 11 3 1 2 1 2 3 2 4 5 2 恢复后的二维数组0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Process finished with exit code 0","tags":[]},{"title":"Web协议详解与抓包实战简记","date":"2019-12-31T08:45:34.000Z","path":"2019/12/31/Web协议详解与抓包实战简记/","text":"notion链接——Web协议详解简记","tags":[]},{"title":"Sublime 配置记录","date":"2019-12-29T03:07:00.000Z","path":"2019/12/29/Sublime-配置记录/","text":"Sublime 配置 多行批量修改数据的必备工具,也是前端最喜欢的文本编辑器之一。下面快捷键配置推荐收藏。 [SFTP 插件] 配置文件 Demo12345678910111213141516{ \"host\": \"jinghanco.com\", \"user\": \"root\", \"password\": \"***\", \"port\": \"21\", \"remote_path\": \"/jinghan_web/...\", \"ignore_regexes\": [ \"\\\\.sublime-(project|workspace)\", \"sftp-config(-alt\\\\d?)?\\\\.json\", \"sftp-settings\\\\.json\", \"/venv/\", \"\\\\.svn/\", \"\\\\.hg/\", \"\\\\.git/\", \"\\\\.bzr\", \"_darcs\", \"CVS\", \"\\\\.DS_Store\", \"Thumbs\\\\.db\", \"desktop\\\\.ini\", \"/Runtime/\", \"\\\\/Upload\", \"\\\\.backup\", \"\\\\.save\", \"\\\\.copy\", \"\\\\.test.php\", \".gitignore\", \"index.php\" ],} [Sublime] 用户配置123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869{ \"always_show_minimap_viewport\": false, \"auto_complete_commit_on_tab\": false, \"auto_find_in_selection\": true, \"auto_match_enabled\": true, \"bold_folder_labels\": true, \"caret_style\": \"phase\", \"close_windows_when_empty\": false, // \"color_scheme\": \"Packages/Color Scheme - Default/Monokai.tmTheme\", \"default_encoding\": \"UTF-8\", \"default_line_ending\": \"unix\", \"detect_indentation\": true, \"drag_text\": true, \"draw_minimap_border\": true, \"ensure_newline_at_eof_on_save\": true, \"fallback_encoding\": \"UTF-8\", \"font_family\": \"consolas\", \"font_size\": 11, \"highlight_line\": true, \"highlight_modified_tabs\": true, \"ignored_packages\": [ \"ColorPicker\", \"ConvertToUTF8\", \"CSS Extended Completions\", \"DocBlockr\", \"git\", \"Makefile\", \"Markdown Preview\", \"MarkdownEditing\", \"Phpcs\", \"Quick File Move\", \"SideBarEnhancements\", \"Vintage\", \"Vintageous\" ], \"indent_guide_options\": [ \"draw_active\", \"draw_normal\" ], \"indent_to_bracket\": true, \"line_padding_bottom\": 1, \"line_padding_top\": 1, \"match_brackets\": true, \"match_brackets_angle\": true, \"match_brackets_braces\": false, \"match_tags\": true, \"preview_on_click\": true, \"rulers\": [ 80, 120 ], \"scroll_past_end\": true, \"shift_tab_unindent\": true, \"show_encoding\": true, \"show_line_endings\": true, \"show_tab_close_buttons\": false, \"tab_size\": 4, // \"theme\": \"SoDaReloaded Dark.sublime-theme\", \"translate_spaces_to_tabs\": false, \"translate_tabs_to_spaces\": true, \"tree_animation_enabled\": true, \"trim_automatic_white_space\": true, \"trim_trailing_white_space_on_save\": true, \"word_wrap\": false, \"wrap_width\": 0} [Sublime] 快捷键配置1234567891011121314151617181920212223242526[ // 保存所有文件 { \"keys\": [\"ctrl+shift+s\"], \"command\": \"save_all\" }, // 下一个视图 { \"keys\": [\"ctrl+tab\"], \"command\": \"next_view\" }, // 上一个视图 { \"keys\": [\"ctrl+shift+tab\"], \"command\": \"prev_view\" }, // 变为大写 { \"keys\": [\"alt+a\"], \"command\": \"upper_case\" }, // 变为小写 { \"keys\": [\"alt+s\"], \"command\": \"lower_case\" }, // 复制行 { \"keys\": [\"ctrl+shift+d\"], \"command\": \"duplicate_line\" }, // 选择下一个相同的被选字符 { \"keys\": [\"ctrl+d\"], \"command\": \"find_under_expand\" }, // 跳过选择 { \"keys\": [\"ctrl+k\", \"ctrl+d\"], \"command\": \"find_under_expand_skip\" }, // 显示隐藏 { \"keys\": [\"alt+x\"/*, \"alt+z\"*/], \"command\": \"toggle_side_bar\" }, // 快速打开hosts文件 { \"keys\": [\"ctrl+alt+h\"], \"command\": \"prompt_open_file C:\\\\Windows\\\\System32\\\\drivers\\\\etc\\\\hosts\"}, { \"keys\": [\"ctrl+alt+j\"], \"command\": \"js_format\", \"context\": [{\"key\": \"selector\", \"operator\": \"equal\", \"operand\": \"source.js,source.json\"}] }]","tags":[{"name":"tool","slug":"tool","permalink":"https://caochikai.github.io/tags/tool/"}]},{"title":"maven公共仓库推荐配置","date":"2019-12-28T09:34:00.000Z","path":"2019/12/28/maven公共仓库推荐配置/","text":"maven公共仓库推荐配置 maven公共仓库毕竟是在国外,所以使用maven默认配置仓库地址,下载网络可能就几十KB。推荐在公司里面搭建一个nexus私服仓库,因为有些jar随着时间变化(特别是老项目),maven公服都会丢失,最惨的是莫过于在GitHub项目都没有了。下面是我个人对关于maven仓库配置的一点不成熟建议,为了配合IDEA和命令行,idea已经自带maven,且maven与idea版本做了适配的,通常情况下不建议单独另外下载maven;web项目通常分为三种,第一种常见的maven项目(pom.xml),第二种普通的WebRoot项目,第三种gradle(强大,题外话)。如果贵司有私服那建议maven项目,没有那就推荐WebRoot集成第三方库web-inf/lib。毕竟随着时间流逝,很多第三方库会被淘汰或者维护开发者已经消失在业界。 在onedriver或者云盘里,放置自己常用的开发工具,maven有个config文件夹,下面有个setting.xml,里面有 个mirror的标签,替换掉,IDE环境需要在idea配置maven指定setting地址(建议走下面默认配置,无需改动),项目的部分依赖包在webapp/web-inf/lib下! 默认推荐:在win系统开发环境下,本地仓库默认在C:\\Users\\cao.m2下repository,建议配置在固态硬盘,把下面推荐mirrors标签配置替换maven/config/settings.xml后直接放置在C:\\Users\\cao.m2(系统当前用户目录)。 123456789101112131415161718192021222324252627282930313233343536373839404142434445<mirrors> <mirror> <id>alimaven</id> <name>aliyun maven</name> <url><http://maven.aliyun.com/nexus/content/groups/public/></url> <mirrorOf>central</mirrorOf> </mirror> <mirror> <id>central</id> <name>Maven Repository Switchboard</name> <url><http://repo1.maven.org/maven2/></url> <mirrorOf>central</mirrorOf> </mirror> <mirror> <id>repo2</id> <mirrorOf>central</mirrorOf> <name>Human Readable Name for this Mirror.</name> <url><http://repo2.maven.org/maven2/></url> </mirror> <mirror> <id>ibiblio</id> <mirrorOf>central</mirrorOf> <name>Human Readable Name for this Mirror.</name> <url><http://mirrors.ibiblio.org/pub/mirrors/maven2/></url> </mirror> <mirror> <id>jboss-public-repository-group</id> <mirrorOf>central</mirrorOf> <name>JBoss Public Repository Group</name> <url><http://repository.jboss.org/nexus/content/groups/public></url> </mirror> <mirror> <id>google</id> <name>google maven</name> <url><https://maven.google.com/></url> <mirrorOf>central</mirrorOf> </mirror> <!-- 中央仓库在中国的镜像 --> <mirror> <id>maven.net.cn</id> <name>oneof the central mirrors in china</name> <url><http://maven.net.cn/content/groups/public/></url> <mirrorOf>central</mirrorOf> </mirror></mirrors>","tags":[{"name":"maven","slug":"maven","permalink":"https://caochikai.github.io/tags/maven/"}]},{"title":"Mybatis技术内幕源码解析:缓存模块","date":"2019-12-27T14:06:00.000Z","path":"2019/12/27/Mybatis技术内幕源码解析:缓存模块/","text":"2.9、缓存模块 在Mybatis中,最常听见的就是mybatis的查询一二级缓存,本质都是org.apache.ibatis.cache缓存模块下Cache的实现。本文主要是对Cache接口以及实现类进行结束,并涉及到了装饰器模式。该接口实现类众多,只有PerpetualCache提供了Cache接口的基本实现,下面分析Cache接口后,接着PerpetualCache(超级简单): 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889package org.apache.ibatis.cache;import java.util.concurrent.locks.ReadWriteLock;/**缓存服务发现 * SPI(service provider interface) for cache providers. * <p>一个缓存实例对应着一个namespace命名空间 * One instance of cache will be created for each namespace. * <p> * The cache implementation must have a constructor that receives the cache id as an String parameter.. */public interface Cache { /**缓存对象的唯一ID * @return The identifier of this cache */ String getId(); /**向缓存里添加数据,key通常是CacheKey,value是查询的结果 * @param key Can be any object but usually it is a {@link CacheKey} * @param value The result of a select. */ void putObject(Object key, Object value); /**根据key获取相应的缓存结果 * @param key The key * @return The object stored in the cache. */ Object getObject(Object key); /**从3.3.0开始在回滚rollback期间缓存的值可能会缓存丢失命中, * 为了防止缓存穿透直接命中数据库,可以往key先放null值(会上锁),设置上后就会释放锁。 * As of 3.3.0 this method is only called during a rollback * for any previous value that was missing in the cache. * This lets any blocking cache to release the lock that * may have previously put on the key. * A blocking cache puts a lock when a value is null * and releases it when the value is back again. * This way other threads will wait for the value to be * available instead of hitting the database. */ Object removeObject(Object key); /**清空缓存 * Clears this cache instance. */ void clear(); /**可选实现,因为mybatis核心根本不会使用该方法 * Optional. This method is not called by the core. *返回其元素个数,而不是容器总大小 * @return The number of elements stored in the cache (not its capacity). */ int getSize(); /**可选实现,从 3.2.6版本开始核心方法不再会使用改方法getReadWriteLock * Optional. As of 3.2.6 this method is no longer called by the core. * <p>缓存所需的任何锁都必须由缓存提供内部实现 * Any locking needed by the cache must be provided internally by the cache provider. */ default ReadWriteLock getReadWriteLock() { return null; }}package org.apache.ibatis.cache.impl;import ... public class PerpetualCache implements Cache {//缓存对象的唯一id private final String id;//用于记录缓存的容器 private Map<Object, Object> cache = new HashMap<>(); public PerpetualCache(String id) { this.id = id; }//下面记录所有的方法都来自Map对象的相应方法 @Override public String getId() { return id; } ...} org.apache.ibatis.cache.decorators下是装饰器实现,以BlockingCache举例,支持阻塞版本的缓存装饰器,它通过ReentrantLock保证只有一个线程到数据库中查找指定的key对应的数据。可以从putObject和getObject方法入手,下面在源码里了解其具体实现: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081package org.apache.ibatis.cache.decorators;import .../**简单阻塞版本的缓存装饰器 * Simple blocking decorator *简单而高效版本的EhCache阻塞装饰器,当找不到该缓存就会在key上锁 * Simple and inefficient version of EhCache's BlockingCache decorator. * It sets a lock over a cache key when the element is not found in cache. * This way, other threads will wait until this element is filled instead of hitting the database. *这样,其他线程将等待直到该元素被填充,而不是直接命中访问数据库。 */public class BlockingCache implements Cache {//阻塞超时时长 private long timeout;//被装饰的底层对象 private final Cache delegate;//线程安全的ConcurrentHashMap,每个key都有个重入锁ReentrantLock private final ConcurrentHashMap<Object, ReentrantLock> locks;...省略构造器和一些属性geter\\setter//当查出该值放入该key下,就会释放该线程之前持有的锁 @Override public void putObject(Object key, Object value) { try { delegate.putObject(key, value); } finally { releaseLock(key); } } @Override public Object getObject(Object key) {//获取key对应的锁 acquireLock(key);//获取key对应的缓存项 Object value = delegate.getObject(key); if (value != null) {//释放改key持有的锁 releaseLock(key); } return value; } @Override public Object removeObject(Object key) { // 这个方法虽然名字长成这样,但是依然还是只会调用releaseLock方法 releaseLock(key); return null; } private ReentrantLock getLockForKey(Object key) { return locks.computeIfAbsent(key, k -> new ReentrantLock()); } private void acquireLock(Object key) {//如果没有没有锁就创建 Lock lock = getLockForKey(key); if (timeout > 0) { try {//在指定timeout时间内尝试获取锁,false代表超时 boolean acquired = lock.tryLock(timeout, TimeUnit.MILLISECONDS); if (!acquired) { throw new CacheException(\"Couldn't get a lock in \" + timeout + \" for the key \" + key + \" at the cache \" + delegate.getId()); } } catch (InterruptedException e) { throw new CacheException(\"Got interrupted while trying to acquire lock for key \" + key, e); } } else {//获取锁,不带超时时间 lock.lock(); } } private void releaseLock(Object key) {//从缓存锁的集合中获取指定key对应的锁 ReentrantLock lock = locks.get(key);//判断该锁是否被该线程持有 if (lock.isHeldByCurrentThread()) { lock.unlock();//那就释放掉,其他线程才能进行其他操作 } }} FifoCache&LruCache 上面提到那么多Cache接口实现类,有什么用处呢?在很多场景中,比如为了控制缓存的大小,系统需要通过一定策略清理缓存。FifoCache(Fist in,Fist Oout)是先入先出策略,当向缓存添加数据时,通过设置缓存项的个数上限控制进行清理的阈值。LruCache是最近最少使用(Least Recently Used,LRU)策略,在需要清理缓存时,它会清除最近最少使用的缓存项。下面简略介绍一些比较重要的核心代码: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687/**先入先出 * FIFO (first in, first out) cache decorator. */public class FifoCache implements Cache {//底层被装饰的缓存对象 private final Cache delegate;//通过队列数据结构记录顺序,实际是LinkedList<object>类型的集合对象 private final Deque<Object> keyList;//缓存项的个数上限清理动作的阈值 private int size;...省略setter/getter @Override public void putObject(Object key, Object value) {//检测是否到达阈值,并清理缓存 cycleKeyList(key); delegate.putObject(key, value); } private void cycleKeyList(Object key) {//先记录该缓存 keyList.addLast(key);//判断是达到上限 if (keyList.size() > size) {//清理最老的(排在队头的) Object oldestKey = keyList.removeFirst(); delegate.removeObject(oldestKey); } }}/**最近最少使用 * Lru (least recently used) cache decorator. */public class LruCache implements Cache {//底层被装饰的缓存对象 private final Cache delegate;//实际LinkedHashMap<object,object>类型对象,用于记录key最近的使用情况 private Map<Object, Object> keyMap;//记录最少使用的key private Object eldestKey; public LruCache(Cache delegate) { this.delegate = delegate;//默认大小为1024 setSize(1024); }...省略setter/getter public void setSize(final int size) {//第三个参数控制是否记录其顺序,LinkedHashMap.get方法会改变其顺序(把该节点往最后位置放move node to last) keyMap = new LinkedHashMap<Object, Object>(size, .75F, true) {//重写removeEldestEntry最终会被其put方法调用到 @Override protected boolean removeEldestEntry(Map.Entry<Object, Object> eldest) {//如果达到缓存上限则更新eldestKey boolean tooBig = size() > size; if (tooBig) { eldestKey = eldest.getKey(); } return tooBig; } }; } @Override public void putObject(Object key, Object value) {//添加缓存项后删除最久未使用项 delegate.putObject(key, value); cycleKeyList(key); } @Override public Object getObject(Object key) { keyMap.get(key); //touch触发LinkedHashMap记录排序 return delegate.getObject(key); } private void cycleKeyList(Object key) { keyMap.put(key, key);//eldestKey不是空代表到了上限,那就执行删除removeObject if (eldestKey != null) { delegate.removeObject(eldestKey); eldestKey = null; } }} SoftCache&WeakCache 这两个缓存实现主要应用场景在不重要且方便GC的需求下,这就不得不提及java的四种引用类型。下面引用从强到弱依次排列介绍: 强引用(Strong Reference):最常见就是new对象被强引用,GC是不能绝不会回收该类型对象,当然引用类型会变化,并非一成不变。 软引用(Soft Reference):当JVM内存不足时候,软引用指向的对象会被释放,需要通过Reference.get方法返回值,判断是否对象是否存活。 引用队列(ReferenceQueue):在创建 SoftReference对象时,当被GC回收时,JVM就会将该SoftReference对象添加到关联的引用队列中。当需要检测时,就可以从引用队列中获取这些SoftReference对象。可参考java.util.WeakHashMap的代码。 弱引用(Weak Reference):在发生GC检查到该对象的引用类型全是weak reference,就会被回收;参考java.util.WeakHashMap。 幽灵引用(Phantom Reference):小名(虚引用),必须指定引用队列才能使用,当GC发生回收该引用对象会通知到该队列,作为Object.finalize()被废弃后的代替解决方案。1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889/** * Soft Reference cache decorator */public class SoftCache implements Cache {//该队列的缓存对象属于都有强引用存在,代表该部分缓存不会被回收 private final Deque<Object> hardLinksToAvoidGarbageCollection;//ReferenceQueue引用队列,记录被GC回收的缓存项(SoftEntry对象) private final ReferenceQueue<Object> queueOfGarbageCollectedEntries; private final Cache delegate;//强引用的数量,默认是256 private int numberOfHardLinks; public SoftCache(Cache delegate) { this.delegate = delegate;//初始化成员 this.numberOfHardLinks = 256; this.hardLinksToAvoidGarbageCollection = new LinkedList<>(); this.queueOfGarbageCollectedEntries = new ReferenceQueue<>(); }...省略setter/getter和简单方法 @Override public void putObject(Object key, Object value) {//清除已被GC的缓存 removeGarbageCollectedItems();//添加新的缓存 delegate.putObject(key, new SoftEntry(key, value, queueOfGarbageCollectedEntries)); } @Override public Object getObject(Object key) { Object result = null; @SuppressWarnings(\"unchecked\") // assumed delegate cache is totally managed by this cache SoftReference<Object> softReference = (SoftReference<Object>) delegate.getObject(key);//检查是否有缓存项 if (softReference != null) { result = softReference.get();//已经被GC回收了 if (result == null) {//从缓存也删除,与GC同步 delegate.removeObject(key); } else { //修改不仅仅需要读锁(那就是写锁也要) See #586 (and #335) modifications need more than a read lock synchronized (hardLinksToAvoidGarbageCollection) {//重复获取证明属于强引用,记录到hardLinksToAvoidGarbageCollection hardLinksToAvoidGarbageCollection.addFirst(result);//类似先进先出,到达阈值清空老的 if (hardLinksToAvoidGarbageCollection.size() > numberOfHardLinks) { hardLinksToAvoidGarbageCollection.removeLast(); } } } } return result; } @Override public void clear() {//先清空强引用该集合 synchronized (hardLinksToAvoidGarbageCollection) { hardLinksToAvoidGarbageCollection.clear(); }//再清空被GC回收的缓存项 removeGarbageCollectedItems();//清理底层缓存 delegate.clear(); } private void removeGarbageCollectedItems() { SoftEntry sv;//遍历queueOfGarbageCollectedEntries集合 while ((sv = (SoftEntry) queueOfGarbageCollectedEntries.poll()) != null) {//将已被GC的缓存也清除了(与GC同步) delegate.removeObject(sv.key); } } private static class SoftEntry extends SoftReference<Object> { private final Object key; SoftEntry(Object key, Object value, ReferenceQueue<Object> garbageCollectionQueue) {//指向value的是软引用,且关联了队列garbageCollectionQueue super(value, garbageCollectionQueue);//强引用 this.key = key; } }} ScheduledCache&LoggingCache&SynchronizedCache&CacheSerializedCache 最后提一下剩下的几个缓存装饰器,ScheduledCache看名字就知道是周期计划行清空缓存,默认是一小时;LoggingCache是带日志功能的缓存,主要用来统计缓存命中次数和访问次数,以及输出指定的格式;SynchronizedCache添加了同步功能,通过在每个方法上添加synchronized;CacheSerializedCache提供添加针对value进行序列化后byte[]存入缓存,而获取就对byte[]进行反序列化(一个全新对象),使用的是jdk原生序列化。 2.9.3 CacheKey 这个key有点复杂啊,key可能不仅仅是个String,通过CacheKey就添加多个对象updateList影响缓存项key的唯一性。updateList包含MappedStatement的id、指定查询结果集的范围——RowBounds.offset和RowBounds.limit、查询所使用的SQL语句、该SQL语句的实际参数值。 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374package org.apache.ibatis.cache;import ...public class CacheKey implements Cloneable, Serializable { private static final int DEFAULT_MULTIPLIER = 37; private static final int DEFAULT_HASHCODE = 17;//参与计算的hashcode,默认是DEFAULT_MULTIPLIER private final int multiplier;//CacheKey对象的hashcode,默认是DEFAULT_HASHCODE private int hashcode;//校验和 private long checksum;//updateList集合个数 private int count;//由该集合中的所有对象共同决定两个CacheKey是否相同 private List<Object> updateList; public CacheKey() { this.hashcode = DEFAULT_HASHCODE; this.multiplier = DEFAULT_MULTIPLIER; this.count = 0; this.updateList = new ArrayList<>(); }...省略setter/getter和简单方法 public void update(Object object) {//根据不同类型获取其baseHashCode int baseHashCode = object == null ? 1 : ArrayUtil.hashCode(object); count++; checksum += baseHashCode; baseHashCode *= count;//重新计算hashcode hashcode = multiplier * hashcode + baseHashCode; updateList.add(object); } public void updateAll(Object[] objects) { for (Object o : objects) { update(o); } }//重写equals @Override public boolean equals(Object object) { if (this == object) { return true; } if (!(object instanceof CacheKey)) { return false; } final CacheKey cacheKey = (CacheKey) object;//比较hashcode、checksum、count if (hashcode != cacheKey.hashcode) { return false; } if (checksum != cacheKey.checksum) { return false; } if (count != cacheKey.count) { return false; }//比较updateList每一项 for (int i = 0; i < updateList.size(); i++) { Object thisObject = updateList.get(i); Object thatObject = cacheKey.updateList.get(i); if (!ArrayUtil.equals(thisObject, thatObject)) { return false; } } return true; }} ```","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"Mybatis技术内幕源码解析:参数和结果集处理","date":"2019-12-26T10:28:00.000Z","path":"2019/12/26/Mybatis技术内幕源码解析:参数和结果集处理/","text":"2.8、binding 模块 org.apache.ibatis.binding模块是为了解决Mapper类接口和对应Xml配置文件之间映射,通常Mapper接口定义了SQL语句对应的方法,而xml里面配置了对应的SQL语句,所以在Mybatis初始化的时候编译器会检查配置Mapper和xml,并关联起来。特别在参数处理那块非常复杂,需要给位看官的耐心分析,下面就要先从核心组件关系入手: upload successful 2.8.1 MapperRegistry&MapperProxyFactory XXXRegistry看后缀命名风格就知道又是个注册类,MapperRegistry是Mapper接口及其对应的代理对象工厂的注册中心。Configuration是MyBatis 全局性的配置对象,在MyBatis初始化的过程中,所有配置信息会被解析成相应的对象并记录到Configuration对象中,后面介绍MyBatis初始化过程时会详细介绍Configuration。我重点要关注Configuration.mapperRegistry属性,它记录当前使用的MapperRegistry对象,下面就让就进行源码导读: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106package org.apache.ibatis.binding;import ...public class MapperRegistry {//Configuration对象全局唯一的配置对象,包含所有的配置信息 private final Configuration config;//记录Mapper接口与MapperProxyFactory之间的对应关系,//key为Mapper接口Class,value对应的代理工厂对象MapperProxyFactory private final Map<Class<?>, MapperProxyFactory<?>> knownMappers = new HashMap<>(); public MapperRegistry(Configuration config) { this.config = config; }//通过Class type获取MapperProxyFactory工厂对象 public <T> T getMapper(Class<T> type, SqlSession sqlSession) { final MapperProxyFactory<T> mapperProxyFactory = (MapperProxyFactory<T>) knownMappers.get(type); if (mapperProxyFactory == null) { throw new BindingException(\"Type \" + type + \" is not known to the MapperRegistry.\"); } try {//传入sqlSession为Mapper接口创建jdk动态代理对象,下面会详细分析mapperProxyFactory创建过程 return mapperProxyFactory.newInstance(sqlSession); } catch (Exception e) { throw new BindingException(\"Error getting mapper instance. Cause: \" + e, e); } }//Class类型判断knownMappers是否存在该Mapper接口 public <T> boolean hasMapper(Class<T> type) { return knownMappers.containsKey(type); }//加载完配置文件和类上注解,将解析的mapper接口通过addMapper方法添加到MapperRegistry.knownMappers, public <T> void addMapper(Class<T> type) {//判断是否是接口 if (type.isInterface()) {//检查是否已经存在,避免重复读取 if (hasMapper(type)) { throw new BindingException(\"Type \" + type + \" is already known to the MapperRegistry.\"); }//标志是否完成load加载 boolean loadCompleted = false; try {//将Mapper接口对应的Class对象和MapperproxyFactory对象添加到knownMappers集合 knownMappers.put(type, new MapperProxyFactory<>(type)); // It's important that the type is added before the parser is run // otherwise the binding may automatically be attempted by the // mapper parser. If the type is already known, it won't try.//这个涉及到xml解析和注解方面的处理,后面再详细解答 MapperAnnotationBuilder parser = new MapperAnnotationBuilder(config, type); parser.parse(); loadCompleted = true; } finally { if (!loadCompleted) { knownMappers.remove(type); } } } } /**通过knownMappers获取Class集合,该集合为不可变容器unmodifiableCollection,避免被修改只能被读取 * @since 3.2.2 */ public Collection<Class<?>> getMappers() { return Collections.unmodifiableCollection(knownMappers.keySet()); } /**通过包名路径读取指定superType父类的mapper(包扫描) * @since 3.2.2 */ public void addMappers(String packageName, Class<?> superType) { ResolverUtil<Class<?>> resolverUtil = new ResolverUtil<>(); resolverUtil.find(new ResolverUtil.IsA(superType), packageName); Set<Class<? extends Class<?>>> mapperSet = resolverUtil.getClasses(); for (Class<?> mapperClass : mapperSet) { addMapper(mapperClass); } } //...addMappers重载}package org.apache.ibatis.binding;import ...public class MapperProxyFactory<T> {//当前MapperProxyFactory工厂对应的产品mapperInterface接口//换而言之就是一个工厂只能生产一种产品 private final Class<T> mapperInterface;//该mapperInterface接口下对应的方法集合,key为java.lang.reflect.Method,value为MapperMethod对象 private final Map<Method, MapperMethod> methodCache = new ConcurrentHashMap<>();//...geter/setter方法和构造器 @SuppressWarnings(\"unchecked\") protected T newInstance(MapperProxy<T> mapperProxy) { return (T) Proxy.newProxyInstance(mapperInterface.getClassLoader(), new Class[] { mapperInterface }, mapperProxy); }//创建实现了mapperInterface接口的代理对象 public T newInstance(SqlSession sqlSession) { final MapperProxy<T> mapperProxy = new MapperProxy<>(sqlSession, mapperInterface, methodCache); return newInstance(mapperProxy); }} 2.8.2 MapperProxy代理 MapperProxy实现了InvocationHandler接口,接口实现方法就是JDK动态代理的核心方法逻辑,下面认真分析针对mapper代理逻辑: 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950package org.apache.ibatis.binding;import ...public class MapperProxy<T> implements InvocationHandler, Serializable {//java.lang.invoke.MethodHandles.Lookup是被允许访问的成员类型(访问权限修饰符) private static final int ALLOWED_MODES = MethodHandles.Lookup.PRIVATE | MethodHandles.Lookup.PROTECTED | MethodHandles.Lookup.PACKAGE | MethodHandles.Lookup.PUBLIC;//动态代理Constructor private static final Constructor<Lookup> lookupConstructor;//私有访问类型的方法 private static final Method privateLookupInMethod;//全局引用的sqlSession对象 private final SqlSession sqlSession;//接口对应的Class对象 private final Class<T> mapperInterface;//methodCache缓存方法集合key为Method对象,value为对应的MapperMethod private final Map<Method, MapperMethod> methodCache;//...省略构造器 @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try {//如果目标方法继承自object,则直接调用目标方法 if (Object.class.equals(method.getDeclaringClass())) { return method.invoke(this, args); } else if (method.isDefault()) {//是否的默认方法(public non-abstract) if (privateLookupInMethod == null) {//针对jdk8版本默认方法处理 return invokeDefaultMethodJava8(proxy, method, args); } else {//针对jdk9版本默认方法处理 return invokeDefaultMethodJava9(proxy, method, args); } } } catch (Throwable t) { throw ExceptionUtil.unwrapThrowable(t); }//优先在缓存中获取,如果没有就new一个,具体看下面cachedMapperMethod方法 final MapperMethod mapperMethod = cachedMapperMethod(method); return mapperMethod.execute(sqlSession, args); }//新版本的mybatis private MapperMethod cachedMapperMethod(Method method) { return methodCache.computeIfAbsent(method, k -> new MapperMethod(mapperInterface, method, sqlSession.getConfiguration())); }//...省略方法invokeDefaultMethodJava8和invokeDefaultMethodJava9} 2.8.3 MapperMethod MapperMethod封装了接口中对应的方法的元信息,以及对应的SQL语句,与开发者息息相关。另一个比较重要的SqlCommand是MapperMethod中定义的内部类,它使用name字段记录了SQL语句的名称,使用type字段(SqlCommandType类型)记录了SQL语句的类型。SqlCommandType是枚举类型,有效取值为UNKNOWN、INSERT、UPDATE、DELETE、SELECT、FLUSH。MapperMethod源码比较复杂,先分析MapperMethod字段信息,后面会拆分SqlCommand、MethodSignature和ParamNameResolver分析: 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273declaringClasspackage org.apache.ibatis.binding;import ...public class MapperMethod {//SqlCommand记录SQL语句的名称和类型 private final SqlCommand command;//Mapper接口中的方法签名和相关信息,MethodSignature也是MapperMethod的内部类 private final MethodSignature method; public MapperMethod(Class<?> mapperInterface, Method method, Configuration config) { this.command = new SqlCommand(config, mapperInterface, method); this.method = new MethodSignature(config, mapperInterface, method); } public static class SqlCommand {//SQL语句的名称 private final String name;//SqlCommandType枚举类型:UNKNOWN、INSERT、UPDATE、DELETE、SELECT、FLUSH private final SqlCommandType type; public SqlCommand(Configuration configuration, Class<?> mapperInterface, Method method) {//初步是先获取方法名称 final String methodName = method.getName();//方法的Method.clazz属性,主要用来判断是否为父接口信息 final Class<?> declaringClass = method.getDeclaringClass(); MappedStatement ms = resolveMappedStatement(mapperInterface, methodName, declaringClass, configuration);//检查MappedStatement是否创建成功 if (ms == null) {//处理@Flush注解 if (method.getAnnotation(Flush.class) != null) { name = null; type = SqlCommandType.FLUSH; } else { throw new BindingException(\"Invalid bound statement (not found): \" + mapperInterface.getName() + \".\" + methodName); } } else {//初始化name和type name = ms.getId(); type = ms.getSqlCommandType(); if (type == SqlCommandType.UNKNOWN) { throw new BindingException(\"Unknown execution method for: \" + name); } } }//解析创建MappedStatement(SQL语句全部具体信息),后面再重点介绍 private MappedStatement resolveMappedStatement(Class<?> mapperInterface, String methodName, Class<?> declaringClass, Configuration configuration) {//SQL语句的名称是由Mapper接口的名称与对应的方法名称组成的 String statementId = mapperInterface.getName() + \".\" + methodName;//先从Configuration.mappedStatements集合查询缓存 if (configuration.hasStatement(statementId)) { return configuration.getMappedStatement(statementId); } else if (mapperInterface.equals(declaringClass)) {//如果没有父类接口,也没有该sql、方法记录,那就没有了 return null; }//MappperMethod在父接口里面 for (Class<?> superInterface : mapperInterface.getInterfaces()) {//递归查找父接口类型是declaringClass的MappedStatement if (declaringClass.isAssignableFrom(superInterface)) { MappedStatement ms = resolveMappedStatement(superInterface, methodName, declaringClass, configuration); if (ms != null) { return ms; } } } return null; }//...先省略具体方法后面再分析} MapperMethod另一个比较重要的内部类MethodSignature,通过ParamNameResolver处理接口方法的参数列表。ParamNameResolver的name字段(SortedMap<Integer,String>类型),其中key表示参数在参数列表中的索引位置,value 表示参数名称,参数名称可以通过@Param注解指定,如果没有指定@Param注解,则使用参数索引作为其名称。如果参数列表中包含RowBounds或ResultHandler类型的参数,则这两种类型的参数并不会被记录到name集合中,这就会导致参数的索引与名称不一致。下面源码导读中讲解到例子: 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798package org.apache.ibatis.reflection;import ...public class ParamNameResolver {//下标值的前缀名称 private static final String GENERIC_NAME_PREFIX = \"param\"; /** * <p>key为index索引(区别于真实索引,换而言之为按照真正的参数args下标来) * value为参数变量名(通过) * The key is the index and the value is the name of the parameter.<br /> * The name is obtained from {@link Param} if specified. When {@link Param} is not specified, * the parameter index is used. Note that this index could be different from the actual index * when the method has special parameters (i.e. {@link RowBounds} or {@link ResultHandler}). * </p> * <ul>官方的举例: * <li>aMethod(@Param(\"M\") int a, @Param(\"N\") int b) -&gt; {{0, \"M\"}, {1, \"N\"}}</li> * <li>aMethod(int a, int b) -&gt; {{0, \"0\"}, {1, \"1\"}}</li> * <li>aMethod(int a, **RowBounds rb**, int b) -&gt; {{0, \"0\"}, {2, \"1\"}}</li> * </ul> */ private final SortedMap<Integer, String> names;//标志是否使用了**@Param注解** private boolean hasParamAnnotation; public ParamNameResolver(Configuration config, Method method) {//通过反射获取参数类型数组 final Class<?>[] paramTypes = method.getParameterTypes();//获取方法上的二维数组注解 final Annotation[][] paramAnnotations = method.getParameterAnnotations();//这个map会在最后转化成不可变容器集合unmodifiableSortedMap final SortedMap<Integer, String> map = new TreeMap<>(); int paramCount = paramAnnotations.length; //遍历注解 get names from @Param annotations for (int paramIndex = 0; paramIndex < paramCount; paramIndex++) {//判断特殊类型RowBounds或者ResultHandler,发现则直接跳过本次遍历 if (isSpecialParameter(paramTypes[paramIndex])) { // skip special parameters continue; } String name = null; for (Annotation annotation : paramAnnotations[paramIndex]) { if (annotation instanceof Param) {//只要出现Param注解一次立刻设置标志hasParamAnnotation,结束遍历返回指定的name hasParamAnnotation = true; name = ((Param) annotation).value(); break; } }//判断是否有特殊name if (name == null) { // @Param没有指定,根据Configuration.useActualParamName(默认true)是否使用实际名称 if (config.isUseActualParamName()) {//getActualParamName在下面解析,ParamNameUtil.getParamNames(method).get(paramIndex); name = getActualParamName(method, paramIndex); } if (name == null) { // 使用索引值use the parameter index as the name (\"0\", \"1\", ...) name = String.valueOf(map.size()); } }//记录到map集合 map.put(paramIndex, name); }//初始化成不可变集合 names = Collections.unmodifiableSortedMap(map); }//判断是否是RowBounds和ResultHandler两种类型的参数 private static boolean isSpecialParameter(Class<?> clazz) { return RowBounds.class.isAssignableFrom(clazz) || ResultHandler.class.isAssignableFrom(clazz); }...}package org.apache.ibatis.reflection;import ...//获取参数名称的工具类public class ParamNameUtil {//Method和Constructor的父类都是Executable public static List<String> getParamNames(Method method) { return getParameterNames(method); } public static List<String> getParamNames(Constructor<?> constructor) { return getParameterNames(constructor); } private static List<String> getParameterNames(Executable executable) {//通过父类Executable获取参数列表的名字集合 return Arrays.stream(executable.getParameters()).map(Parameter::getName).collect(Collectors.toList()); } private ParamNameUtil() { super(); }} 解析完ParamNameResolver的功能,回到MethodSignature继续研究。MethodSignature也是MapperMethod中定义的内部类,其中封装了Mapper接口中定义的方法的相关信息: 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647public static class MethodSignature {//...省略字段下面介绍 public MethodSignature(Configuration configuration, Class<?> mapperInterface, Method method) { //解析方法类型信息,具体在之前反射工具箱一章提及 Type resolvedReturnType = TypeParameterResolver.resolveReturnType(method, mapperInterface); if (resolvedReturnType instanceof Class<?>) { this.returnType = (Class<?>) resolvedReturnType; } else if (resolvedReturnType instanceof ParameterizedType) { this.returnType = (Class<?>) ((ParameterizedType) resolvedReturnType).getRawType(); } else { this.returnType = method.getReturnType(); }//返回值类型是否为void this.returnsVoid = void.class.equals(this.returnType);//返回值类型是否为Collection或者数组 this.returnsMany = configuration.getObjectFactory().isCollection(this.returnType) || this.returnType.isArray();//返回值是否为Cursor类型 this.returnsCursor = Cursor.class.equals(this.returnType);//是否Optional值容器 this.returnsOptional = Optional.class.equals(this.returnType);//若Methodsignature对应方法的返回值是Map且指定了@MapKey 注解,则使用getMapKey()方法处理 this.mapKey = getMapKey(method); this.returnsMap = this.mapKey != null;//rowBoundsIndex(RowBounds参数位置)和 resultHandlerIndex(ResultHandler参数位置)字段 this.rowBoundsIndex = getUniqueParamIndex(method, RowBounds.class); this.resultHandlerIndex = getUniqueParamIndex(method, ResultHandler.class);//创建ParamNameResolver对象,后面会重点介绍getNamedParams方法 this.paramNameResolver = new ParamNameResolver(configuration, method); }//getUniqueParamIndex主要功能是查找指定参数类型在方法参数的索引位置 private Integer getUniqueParamIndex(Method method, Class<?> paramType) { Integer index = null; final Class<?>[] argTypes = method.getParameterTypes(); for (int i = 0; i < argTypes.length; i++) { if (paramType.isAssignableFrom(argTypes[i])) { if (index == null) { index = i; } else {//RowBounds和ResultHandler类型的参数只能有一个,不能重复出现 throw new BindingException(method.getName() + \" cannot have multiple \" + paramType.getSimpleName() + \" parameters\"); } } } return index; }} 与语法有重要关系过程就在这里,ParamNameResolver有一个非常重要的方法getNamedParams,负责将args[]数组(用户传入的实参列表)转换成SQL语句对应的参数列表。 123456789101112131415161718192021222324252627282930313233343536/** * <p> * A single non-special parameter is returned without a name. * Multiple parameters are named using the naming rule. * In addition to the default names, this method also adds the generic names (param1, param2, * ...). * </p> */ public Object getNamedParams(Object[] args) { final int paramCount = names.size();//没有任何参数 if (args == null || paramCount == 0) { return null;//如果没有指定@Param或者参数只有一个,直接返回第一个参数 } else if (!hasParamAnnotation && paramCount == 1) { return args[names.firstKey()]; } else { final Map<String, Object> param = new ParamMap<>(); int i = 0; for (Map.Entry<Integer, String> entry : names.entrySet()) {//将names属性SortedMap<Integer,String>类型反转//names的value(参数名)为param新集合的key,names的key(参数索引)为param新集合的value param.put(entry.getValue(), args[entry.getKey()]); //参数索引添加前缀、并且从i + 1(1、2、3开始)//例子:add generic param names (param1, param2, ...) final String genericParamName = GENERIC_NAME_PREFIX + String.valueOf(i + 1); // ensure not to overwrite parameter named with @Param//确保不会覆盖@Param指定的特殊参数名,换而言之就是通过param1或者@Param都可以取到同样的参数 if (!names.containsValue(genericParamName)) { param.put(genericParamName, args[entry.getKey()]); } i++; } return param; } } 最后就是结果集处理,也是MapperMethod重要的方法execute,它会根据SQL语句的类型调用SqlSession对应的方法完成数据库操作。如果是是指定了ResultHandler,那就通过org.apache.ibatis.binding.MapperMethod#executeWithResultHandler处理结果集。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145public Object execute(SqlSession sqlSession, Object[] args) { Object result; switch (command.getType()) {//method.convertArgsToSqlCommandParam会调用上面解释到的ParamNameResolver.getNamedParams//rowCountResult处理方法返回值影响行数rowCount,进行结果类型转换 case INSERT: { Object param = method.convertArgsToSqlCommandParam(args); result = rowCountResult(sqlSession.insert(command.getName(), param)); break; } case UPDATE: { Object param = method.convertArgsToSqlCommandParam(args); result = rowCountResult(sqlSession.update(command.getName(), param)); break; } case DELETE: { Object param = method.convertArgsToSqlCommandParam(args); result = rowCountResult(sqlSession.delete(command.getName(), param)); break; } case SELECT: if (method.returnsVoid() && method.hasResultHandler()) { executeWithResultHandler(sqlSession, args); result = null;//返回的是Collection接口实现类或者数组,由executeForMany方法处理 } else if (method.returnsMany()) { result = executeForMany(sqlSession, args);//返回的是Map集合结果,由executeForMap方法处理 } else if (method.returnsMap()) { result = executeForMap(sqlSession, args);//返回的是Cursor结果 } else if (method.returnsCursor()) { result = executeForCursor(sqlSession, args); } else {//最后查询结果只有一条 Object param = method.convertArgsToSqlCommandParam(args); result = sqlSession.selectOne(command.getName(), param);//如果是Optional包装返回值 if (method.returnsOptional() && (result == null || !method.getReturnType().equals(result.getClass()))) {//那就进行Optional包装 result = Optional.ofNullable(result); } } break; case FLUSH: result = sqlSession.flushStatements(); break; default: throw new BindingException(\"Unknown execution method for: \" + command.getName()); }//边界检查 if (result == null && method.getReturnType().isPrimitive() && !method.returnsVoid()) { throw new BindingException(\"Mapper method '\" + command.getName() + \" attempted to return null from a method with a primitive return type (\" + method.getReturnType() + \").\"); } return result; }private void executeWithResultHandler(SqlSession sqlSession, Object[] args) {//获取SOL语句对应的MappedStatement对象,MappedStatement中记录了SQL语句相关信息, MappedStatement ms = sqlSession.getConfiguration().getMappedStatement(command.getName());//StatementType不是存储过程(CALLABLE),并且没有指定ResultMap或ResultType,直接抛出BindingException if (!StatementType.CALLABLE.equals(ms.getStatementType()) && void.class.equals(ms.getResultMaps().get(0).getType())) { throw new BindingException(\"method \" + command.getName() + \" needs either a @ResultMap annotation, a @ResultType annotation,\" + \" or a resultType attribute in XML so a ResultHandler can be used as a parameter.\"); } Object param = method.convertArgsToSqlCommandParam(args);//该方法是否有RowBounds类型参数 if (method.hasRowBounds()) {//获取指定RowBounds类型的参数 RowBounds rowBounds = method.extractRowBounds(args);//通过sqlSession.select查询方法指定查询,并由指定的ResultHandler处理结果对象 sqlSession.select(command.getName(), param, rowBounds, method.extractResultHandler(args)); } else { sqlSession.select(command.getName(), param, method.extractResultHandler(args)); } }//Collection接口实现类或者数组处理过程private <E> Object executeForMany(SqlSession sqlSession, Object[] args) { List<E> result; Object param = method.convertArgsToSqlCommandParam(args);//参考上几句 if (method.hasRowBounds()) { RowBounds rowBounds = method.extractRowBounds(args); result = sqlSession.selectList(command.getName(), param, rowBounds); } else { result = sqlSession.selectList(command.getName(), param); }//将结果集转换为数组或Collection集合 issue #510 Collections & arrays support if (!method.getReturnType().isAssignableFrom(result.getClass())) { if (method.getReturnType().isArray()) { return convertToArray(result); } else {//根据Configuration对结果对象的转换类型 return convertToDeclaredCollection(sqlSession.getConfiguration(), result); } } return result; }private <E> Object convertToDeclaredCollection(Configuration config, List<E> list) {//使用前面介绍的ObjectFactory对象工厂,通过反射方式创建集合对象 Object collection = config.getObjectFactory().create(method.getReturnType());//创建collection集合反射对象MetaObject,实际上还是调用的CCollection接口的addAll方法 MetaObject metaObject = config.newMetaObject(collection); metaObject.addAll(list); return collection; } @SuppressWarnings(\"unchecked\") private <E> Object convertToArray(List<E> list) {//获取数组的元素类型 Class<?> arrayComponentType = method.getReturnType().getComponentType();//创建数组对象 Object array = Array.newInstance(arrayComponentType, list.size());//判断数组元素是否基本原始类型的 if (arrayComponentType.isPrimitive()) {//将list每一项数据都放置到数组中 for (int i = 0; i < list.size(); i++) { Array.set(array, i, list.get(i)); } return array; } else {//否直接转数组 return list.toArray((E[])array); } }//返回的是Map集合结果,由executeForMap方法处理,Cursor处理方法是sqlSession.selectCursorprivate <K, V> Map<K, V> executeForMap(SqlSession sqlSession, Object[] args) { Map<K, V> result;//转化成实参 Object param = method.convertArgsToSqlCommandParam(args); if (method.hasRowBounds()) { RowBounds rowBounds = method.extractRowBounds(args);//调用selectMap方法返回结果就是Map类型 result = sqlSession.selectMap(command.getName(), param, method.getMapKey(), rowBounds); } else { result = sqlSession.selectMap(command.getName(), param, method.getMapKey()); } return result; }","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"Mybatis技术内幕源码解析:Transaction事务","date":"2019-12-25T12:51:00.000Z","path":"2019/12/25/Mybatis技术内幕源码解析:Transaction事务/","text":"2.7、Transaction事务 控制数据库事务是业务型操作(CRUD)的基本功,Mybatis本身通过Transaction(org.apache.ibatis.transaction)接口对数据库进行了抽象化,具体分析如下: 123456789101112131415161718192021222324252627282930313233343536373839package org.apache.ibatis.transaction;import java.sql.Connection;import java.sql.SQLException;/**包装数据库连接关于创建、预处理、提交/回滚和关闭的生命周期 * Wraps a database connection. * Handles the connection lifecycle that comprises: * its creation, preparation, commit/rollback and close. */public interface Transaction { /**获取数据库连接对象 * Retrieve inner database connection. * @return DataBase connection */ Connection getConnection() throws SQLException; /**提交事务 * Commit inner database connection. */ void commit() throws SQLException; /**回滚事务 * Rollback inner database connection. */ void rollback() throws SQLException; /**关闭数据库连接 * Close inner database connection. */ void close() throws SQLException; /**获取事务的超时时间 * Get transaction timeout if set. */ Integer getTimeout() throws SQLException;} Transaction 接口有JdbcTransaction、ManagedTransaction两个实现。ManagedTransaction的实现非常简单,它同样依赖其中的dataSource字段获取连接,但其commit、rollback方法都是空实现,事务的提交和回滚都是依靠容器管理的,关闭方法通过closeConnection字段的值控制数据库连接。JdbcTransaction依赖JDBC Connection控制数据库事务,接下就要进入对JdbcTransaction的分析: 12345678910111213141516171819202122232425package org.apache.ibatis.transaction.jdbc;import.../**下面操作方法都是直接使用java.sql.Connection直接进行操作事务,getTimeout是空实现 *如果autocommit已经开启,那么commit和rollback方法就会被忽略掉 * {@link Transaction} that makes use of the JDBC commit and rollback facilities directly. * It relies on the connection retrieved from the dataSource to manage the scope of the transaction. * Delays connection retrieval until getConnection() is called. * Ignores commit or rollback requests when autocommit is on. * */public class JdbcTransaction implements Transaction {//事务对应的数据库连接 protected Connection connection;//数据库连接对应的数据源 protected DataSource dataSource;//事务的隔离级别 protected TransactionIsolationLevel level;//是否自动提交 protected boolean autoCommit; ...省略下面方法和构造器} JdbcTransaction、ManagedTransaction两个实现,其对象分别由JdbcTransactionFactory 和Managed TransactionFactory负责创建。这里也使用了工厂方法模式。下面就分析工厂创建产品UML和接口规范: Transaction.png 123456789101112131415161718package org.apache.ibatis.transaction;import ...public interface TransactionFactory {//接口默认方法,通常在创建完Transaction后进行自定义配置事务 default void setProperties(Properties props) { // NOP }//在指定Connection上创建Transaction Transaction newTransaction(Connection conn);//从指定的DataSource获取数据库连接,并在此连接之上创建事务对象,后面就算配置TransactionIsolationLevel和autoCommit Transaction newTransaction(DataSource dataSource, TransactionIsolationLevel level, boolean autoCommit);} 在实践中,MyBatis通常会与Spring集成使用,数据库的事务是交给Spring进行管理的,以后会介绍Transaction接口的另一实现SpringManagedTransaction。","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"idea插件第二弹推荐","date":"2019-12-24T09:33:00.000Z","path":"2019/12/24/idea插件第二弹推荐/","text":"idea插件第二弹推荐工具收藏——idea推荐插件一、概念 工欲善其事必先利其器,博主是个死忠工具派,为了解决一个大问题可能会收集多个工具和方案,然后求证对比出体验报告。后续文章有一大类就是工具类推荐,而本篇文章重点就是idea 安装插件记录,简要记录安装方法快速搭建个性化idea,还有一些关于UI方面插件可谓多不胜数,而且每个人口味不一,请各位自行选择——插件搜索技巧tags为Theme或者UI。 插件列表最强大插件卫冕之王——lambda表达式 名称 描述 JRebel 代替springboot dev热部署方案,最方便激活方式 Lombok 精简bean,各种功能强大又实用注解,搬砖人的MVP,结合Hutool实在完美 AceJump 光标跳跃,替代vim不二之选 CodeGlance 代码地图,方便查阅跳转 MavenHelper 快速分析maven 包冲突的问题,搜索包名 MyBatis Log Plugin Restore the mybatis generate sql to original whole sql.(拼接完整sql) Log Support 2 快速log.info(),结合Lombok插件注解@Slf4j可以说无敌 Grep Console 针对控制台日志不同等级进行染色高亮 MyBatisCodeHelperPro 对Mybatis支持非常强,请认真参考https://gejun123456.github.io/MyBatisCodeHelper-Pro/#/README,提高生产力的工具啊(收费可破解)! Free Mybatis plugin Mybaits支持跳转,有钱大爷请收费版Mybatis plugin强大破解较少,差评 Rainbow Brackets 彩虹括号,多层嵌套代码显示助手 String Manipulation 强大的字符串格式转化 GitToolBox git的强大助手,定时拉取代码、代码逐行展示日志(各种辅助金手指) RestfulToolkit 辅助通过URL定位Controller,简洁版的PostMan Alibaba Cloud Toolkit 结合阿里云(非阿里也支持),多节点发布工具加强力linux客户端 stackoverflow stackoverflow快速搜索bug插件 Translation 最强大的翻译插件,支持中文替换英文,解决起英文变量名难的重度患者 Key Promoter X 所有操作的快捷键提示,忘记鼠标真的 Cyan Light Theme A light theme,偏青色对眼睛很柔和舒服,黑暗主题实在不适应 Hiberbee theme 黑暗主题的色彩分明版本 反馈与建议 2012年java程序员可以说非常吃香,今年2019从业人数暴增,职业发展挑战变得越来越大!现在流行自动构建和自动部署CI,开发运维一体化docker,整个互联网都在追求敏捷开发的今天。掌握一款追求效率功能的IDE非常重要,很多群和公众号对ide和Eclipse争议很大。但请记住斯大林名言——落后就要挨打,ide本身代表高效,但是插件也别装太多,免得启动还要半天哈哈哈😀 (首推)慕课网免费教程:IntelliJ IDEA神器使用技巧 (推荐)尚硅谷IDEA视频教程:链接:https://pan.baidu.com/s/11biVBv9EI9yfL6Cee0r0LQ,密码:n7hn 看完上面两个教程,你会怀疑自己用的idea是假的,原来写代码还可以这样的。 邮箱:caochikai@qq.com","tags":[{"name":"tool","slug":"tool","permalink":"https://caochikai.github.io/tags/tool/"},{"name":"plugin","slug":"plugin","permalink":"https://caochikai.github.io/tags/plugin/"}]},{"title":"Mybatis技术内幕源码解析:DataSource数据源","date":"2019-12-23T10:20:00.000Z","path":"2019/12/23/Mybatis技术内幕源码解析:DataSource数据源/","text":"2.6、DataSource数据源 数据源组件都要实现javax.sql.DataSource接口,常见的有Druid、HikariCP和C3PO。Mybatis提供两个实现:PooledDataSource和UnpooledDataSource,通过工厂模式创建。 DataSourceFactory 123456789101112131415package org.apache.ibatis.datasource;import java.util.Properties;import javax.sql.DataSource;/** * @author Clinton Begin */public interface DataSourceFactory { //通过Properties 配置数据源 void setProperties(Properties props); //工厂获取数据源实例接口 DataSource getDataSource();} 2.6.2 DataSourceFactory 数据源工厂接口的实现类是DataSourceFactory,UnpooledDataSourceFactory和PooledDataSource是具体产品实现类,JndiDataSourceFactory是依赖JNDI服务从容器中获取用户配置的DataSource,我们以UnpooledDataSourceFactory举例如下: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172package org.apache.ibatis.datasource.unpooled;import java.util.Properties;import javax.sql.DataSource;import org.apache.ibatis.datasource.DataSourceException;import org.apache.ibatis.datasource.DataSourceFactory;import org.apache.ibatis.reflection.MetaObject;import org.apache.ibatis.reflection.SystemMetaObject;/** * @author Clinton Begin */public class UnpooledDataSourceFactory implements DataSourceFactory { //属性前缀为driver. private static final String DRIVER_PROPERTY_PREFIX = \"driver.\"; //driver.文字长度 private static final int DRIVER_PROPERTY_PREFIX_LENGTH = DRIVER_PROPERTY_PREFIX.length(); //数据源属性 protected DataSource dataSource; public UnpooledDataSourceFactory() { this.dataSource = new UnpooledDataSource(); } @Override public void setProperties(Properties properties) { Properties driverProperties = new Properties(); //根据dataSource创建MetaObject (反射获取该对象元数据) MetaObject metaDataSource = SystemMetaObject.forObject(dataSource); for (Object key : properties.keySet()) { String propertyName = (String) key; //检查属性名称是否以driver.开头 if (propertyName.startsWith(DRIVER_PROPERTY_PREFIX)) { String value = properties.getProperty(propertyName); //截取driver.后记录到driverProperties成员变量 driverProperties.setProperty(propertyName.substring(DRIVER_PROPERTY_PREFIX_LENGTH), value); } else if (metaDataSource.hasSetter(propertyName)) { //判断是否有该对象set对应的属性名 String value = (String) properties.get(propertyName); //根据属性类型进行转化 Object convertedValue = convertValue(metaDataSource, propertyName, value); metaDataSource.setValue(propertyName, convertedValue); } else { throw new DataSourceException(\"Unknown DataSource property: \" + propertyName); } } if (driverProperties.size() > 0) { metaDataSource.setValue(\"driverProperties\", driverProperties); } } @Override public DataSource getDataSource() { return dataSource; }//支持属性类型进行类型转换,主要是Integer、Long、Boolean三种类型的转换 private Object convertValue(MetaObject metaDataSource, String propertyName, String value) { Object convertedValue = value; Class<?> targetType = metaDataSource.getSetterType(propertyName); if (targetType == Integer.class || targetType == int.class) { convertedValue = Integer.valueOf(value); } else if (targetType == Long.class || targetType == long.class) { convertedValue = Long.valueOf(value); } else if (targetType == Boolean.class || targetType == boolean.class) { convertedValue = Boolean.valueOf(value); } return convertedValue; }} 2.6.3 UnpooledDataSource javax.sql.DataSource接口在数据源模块中扮演了产品接口的角色,MyBatis提供了两个DataSource 接口的实现类,分别是UnpooledDataSource和PooledDataSource,它们扮演着具体产品类的角色。 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768package org.apache.ibatis.datasource.unpooled;import ...public class UnpooledDataSource implements DataSource {//数据源类加载器 private ClassLoader driverClassLoader;//配置属性对象 private Properties driverProperties;//缓存注册过的数据源驱动 private static Map<String, Driver> registeredDrivers = new ConcurrentHashMap<>();//驱动 private String driver;//数据库URL private String url;//用户名和密码 private String username; private String password;//是否自动提交 private Boolean autoCommit;//默认事务隔离等级 **private Integer defaultTransactionIsolationLevel;//网络超时时间 private Integer defaultNetworkTimeout; static { Enumeration<Driver> drivers = DriverManager.getDrivers(); while (drivers.hasMoreElements()) { Driver driver = drivers.nextElement();//向DriverManager添加JDBC驱动 registeredDrivers.put(driver.getClass().getName(), driver); } }... private Connection doGetConnection(Properties properties) throws SQLException { //初始化驱动Driver对象 initializeDriver(); //创建真正的数据库链接对象 Connection connection = DriverManager.getConnection(url, properties); //配置autoCommit、defaultTransactionIsolationLevel和defaultNetworkTimeout configureConnection(connection); return connection; } private synchronized void initializeDriver() throws SQLException {//判断注册过,避免重复 if (!registeredDrivers.containsKey(driver)) { Class<?> driverType; try {//通过对应的类加载器获得对应的driverType if (driverClassLoader != null) { driverType = Class.forName(driver, true, driverClassLoader); } else { driverType = Resources.classForName(driver); } // DriverManager requires the driver to be loaded via the system ClassLoader. // http://www.kfu.com/~nsayer/Java/dyn-jdbc.html//真正创建Driver过程 Driver driverInstance = (Driver)driverType.getDeclaredConstructor().newInstance();//DriverProxy是UnpooledDataSource的内部类,是Driver的静态代理类 DriverManager.registerDriver(new DriverProxy(driverInstance)); registeredDrivers.put(driver, driverInstance); } catch (Exception e) { throw new SQLException(\"Error setting driver on UnpooledDataSource. Cause: \" + e); } } }} 2.6.4 PooledDataSource PooledDataSource支持数据库连接池的数据源,依赖UnpooledDataSource创建数据库连接。而且PooledDataSource 并不会直接管理java.sql.Connection对象,而是管理PooledConnection对象。在PooledConnection中封装了真正的数据库连接对象(java.sql.Connection)以及其代理对象,这里的代理对象是通过JDK动态代理产生的。 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162package org.apache.ibatis.datasource.pooled;import .../** * @author Clinton Begin */class PooledConnection implements InvocationHandler {//关闭的方法名称,并不是真正关闭而是返还到池子 private static final String CLOSE = \"close\";//动态代理的类Class private static final Class<?>[] IFACES = new Class<?>[] { Connection.class };//connection的哈希值 private final int hashCode; private final PooledDataSource dataSource;//真正的数据库连接 private final Connection realConnection;//数据库连接的代理对象 private final Connection proxyConnection;//取出的时间戳 private long checkoutTimestamp;//创建的时间戳 private long createdTimestamp;//最近一次使用的时间戳 private long lastUsedTimestamp;//由数据库URL、用户名和密码计算出来的hash值,可用于标识该连接所在的连接池 private int connectionTypeCode;//检查PooledConnection是否有效,防止归还依然使用该连接操作数据库 private boolean valid;...省略一大堆geter/seter方法 /** * Required for InvocationHandler implementation. * * @param proxy - not used * @param method - the method to be executed * @param args - the parameters to be passed to the method * @see java.lang.reflect.InvocationHandler#invoke(Object, java.lang.reflect.Method, Object[]) */ @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { String methodName = method.getName();//如果是CLOSE方法就向dataSource归还连接池,而不是直接关闭 if (CLOSE.hashCode() == methodName.hashCode() && CLOSE.equals(methodName)) { dataSource.pushConnection(this); return null; } try { if (!Object.class.equals(method.getDeclaringClass())) { // issue #579 toString() should never fail // throw an SQLException instead of a Runtime//检查valid是否有效,无效直接抛出异常SQLException checkConnection(); }//调用真正的数据库对象的方法 return method.invoke(realConnection, args); } catch (Throwable t) { throw ExceptionUtil.unwrapThrowable(t); } }} PoolState 管理PooledConnection对象状态的组件,它通过两个ArrayList集合分别管理空闲状态的连接和活跃状态的连接,定义如下: 123456789101112131415161718192021222324252627282930package org.apache.ibatis.datasource.pooled;import ...public class PoolState { protected PooledDataSource dataSource;//空闲状态的连接 protected final List<PooledConnection> idleConnections = new ArrayList<>();//**活跃状态**的连接 protected final List<PooledConnection> activeConnections = new ArrayList<>();//以下就是一些统计字段//请求数据库次数 protected long requestCount = 0;//获取连接累计的时间 protected long accumulatedRequestTime = 0;//checkoutTime表示应用从连接池中取出连接,到归还连接这段时长,//accumulatedCheckoutTime记录了所有连接累积的checkoutTime时长 protected long accumulatedCheckoutTime = 0;//累计超时连接个数 protected long claimedOverdueConnectionCount = 0;//累计超时时间 protected long accumulatedCheckoutTimeOfOverdueConnections = 0;//累计等待时间 protected long accumulatedWaitTime = 0;//等待次数 protected long hadToWaitCount = 0;、//无效的连接数 protected long badConnectionCount = 0;...} PooledDataSource中管理的真正的数据库连接对象是由PooledDataSource中封装的UnpooledDataSource对象创建的,并由PoolState管理所有连接的状态。PooledDataSource中核心字段如下: 123456789101112131415161718192021222324252627282930313233package org.apache.ibatis.datasource.pooled;import .../** * This is a simple, synchronous, thread-safe database connection pool. */public class PooledDataSource implements DataSource {//管理连接池状态以及统计信息 private final PoolState state = new PoolState(this);//UnpooledDataSource对象,用于生成真实的数据库连接对象,构造函数中会初始化该字段 private final UnpooledDataSource dataSource;//默认的最大活跃连接数量 // OPTIONAL CONFIGURATION FIELDS protected int poolMaximumActiveConnections = 10;、//最大空闲连接数量 protected int poolMaximumIdleConnections = 5;//最长的checkout等待时间 protected int poolMaximumCheckoutTime = 20000;//在无法获取连接时候,线程的等待时长 protected int poolTimeToWait = 20000;//最大容忍的本地无效连接数量,如果大于(最大活跃连接数量 + 最大空闲连接数量),直接抛出异常 protected int poolMaximumLocalBadConnectionTolerance = 3;//检查数据库是否SQL语句 protected String poolPingQuery = \"NO PING QUERY SET\";//是否允许发送上面的poolPingQuery 测试SQL语句 protected boolean poolPingEnabled;//当连接超过poo1PingconnectionsNotUsedFor毫秒未使用时,会发送一次测试sQL语句,检测连接是否正常 protected int poolPingConnectionsNotUsedFor;//根据数据库的URL、用户名和密码生成的一个hash值,该哈希值用于标志着当前的连接池 private int expectedConnectionTypeCode;...} PooledDataSource.getConnection()方法首先会调用PooledDataSource.popConnection()方法获取PooledConnection对象,然后通过PooledConnection.getProxyConnection()方法获取数据库连接的代理对象。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105private PooledConnection popConnection(String username, String password) throws SQLException { boolean countedWait = false; PooledConnection conn = null; long t = System.currentTimeMillis(); int localBadConnectionCount = 0; while (conn == null) { synchronized (state) {//同步锁//检测是否有空闲连接 if (!state.idleConnections.isEmpty()) { // Pool has available connection//获取空闲连接 conn = state.idleConnections.remove(0); if (log.isDebugEnabled()) { log.debug(\"Checked out connection \" + conn.getRealHashCode() + \" from pool.\"); } } else {//没有空闲的 Pool does not have available connection//判断当前活跃连接数量是否已经超过的最大活跃数量限制 if (state.activeConnections.size() < poolMaximumActiveConnections) { // 没有就创建一个并封装成PooledConnection conn = new PooledConnection(dataSource.getConnection(), this); if (log.isDebugEnabled()) { log.debug(\"Created connection \" + conn.getRealHashCode() + \".\"); } } else {//超过了限制,获取最先创建的活跃连接 PooledConnection oldestActiveConnection = state.activeConnections.get(0);//获取其超时时间,判断是否已经超时 long longestCheckoutTime = oldestActiveConnection.getCheckoutTime(); if (longestCheckoutTime > poolMaximumCheckoutTime) {// 对超时连接进行统计 state.claimedOverdueConnectionCount++; state.accumulatedCheckoutTimeOfOverdueConnections += longestCheckoutTime; state.accumulatedCheckoutTime += longestCheckoutTime;//并移出活跃连接集合 state.activeConnections.remove(oldestActiveConnection);//如果连接超时且未提交,则自动回滚(省略try/catch代码块) if (!oldestActiveConnection.getRealConnection().getAutoCommit()) { oldestActiveConnection.getRealConnection().rollback(); }//创建新Pooledconnection对象,但是真正的数据库连接并未创建新的 conn = new PooledConnection(oldestActiveConnection.getRealConnection(), this); conn.setCreatedTimestamp(oldestActiveConnection.getCreatedTimestamp()); conn.setLastUsedTimestamp(oldestActiveConnection.getLastUsedTimestamp());//将超时连接设置成为无效状态 oldestActiveConnection.invalidate(); if (log.isDebugEnabled()) { log.debug(\"Claimed overdue connection \" + conn.getRealHashCode() + \".\"); } } else { //无空闲连接、无法创建新连接且无超时连接,则只能阻塞等待 try { if (!countedWait) { state.hadToWaitCount++;//统计等待信息 countedWait = true; } if (log.isDebugEnabled()) { log.debug(\"Waiting as long as \" + poolTimeToWait + \" milliseconds for connection.\"); } long wt = System.currentTimeMillis();//阻塞等待poolTimeToWait,下面就统计一下等待信息 state.wait(poolTimeToWait); state.accumulatedWaitTime += System.currentTimeMillis() - wt; } catch (InterruptedException e) { break; } } } } if (conn != null) { // ping to server and check the connection is valid or not if (conn.isValid()) { if (!conn.getRealConnection().getAutoCommit()) { conn.getRealConnection().rollback(); }//配置PooledConnection的属性,connectionTypeCode由url + username + password组成后的hashCode conn.setConnectionTypeCode(assembleConnectionTypeCode(dataSource.getUrl(), username, password));//记录checkout时间 conn.setCheckoutTimestamp(System.currentTimeMillis());//最近一次使用的时间 conn.setLastUsedTimestamp(System.currentTimeMillis());//进行相关统计 state.activeConnections.add(conn); state.requestCount++; state.accumulatedRequestTime += System.currentTimeMillis() - t; } else { if (log.isDebugEnabled()) { log.debug(\"A bad connection (\" + conn.getRealHashCode() + \") was returned from the pool, getting another connection.\"); }//无效连接数量统计 state.badConnectionCount++; localBadConnectionCount++; conn = null; if (localBadConnectionCount > (poolMaximumIdleConnections + poolMaximumLocalBadConnectionTolerance)) { if (log.isDebugEnabled()) { log.debug(\"PooledDataSource: Could not get a good connection to the database.\"); } throw new SQLException(\"PooledDataSource: Could not get a good connection to the database.\"); } } } } } 看名字就知道(Pool)是支持连接池,那通过前面对PooledConnection.invoke方法的分析我们知道,当调用连接的代理对象的close方法时,并未关闭真正的数据连接,而是代理调用PooledDataSource.pushConnection方法将PooledConnection 对象归还给连接池。下面就分析归还规程是如何实现的: 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849protected void pushConnection(PooledConnection conn) throws SQLException {//同步state synchronized (state) {//从activeConnections集合里面移除该对象 state.activeConnections.remove(conn);//检查连接对象是否有效 if (conn.isValid()) {//若当前空闲连接集合数量小于最大空闲阈值,并且该连接的connectionTypeCode和数据源本身的hashCode一致 if (state.idleConnections.size() < poolMaximumIdleConnections && conn.getConnectionTypeCode() == expectedConnectionTypeCode) {//累计checkout时长 state.accumulatedCheckoutTime += conn.getCheckoutTime(); if (!conn.getRealConnection().getAutoCommit()) { conn.getRealConnection().rollback(); }//为返还连接对象包装成PooledConnection PooledConnection newConn = new PooledConnection(conn.getRealConnection(), this); state.idleConnections.add(newConn);//添加到空闲集合idleConnections newConn.setCreatedTimestamp(conn.getCreatedTimestamp()); newConn.setLastUsedTimestamp(conn.getLastUsedTimestamp());//将原PooledConnection设置为无效 conn.invalidate(); if (log.isDebugEnabled()) { log.debug(\"Returned connection \" + newConn.getRealHashCode() + \" to pool.\"); }//唤醒通知其他等待阻塞的线程 state.notifyAll(); } else {//空闲连接到达上限或者hashCode不匹配(意味着不属于该连接池) state.accumulatedCheckoutTime += conn.getCheckoutTime(); if (!conn.getRealConnection().getAutoCommit()) { conn.getRealConnection().rollback(); }//关闭真正的数据库连接 conn.getRealConnection().close(); if (log.isDebugEnabled()) { log.debug(\"Closed connection \" + conn.getRealHashCode() + \".\"); }//设置成无效 conn.invalidate(); } } else { if (log.isDebugEnabled()) { log.debug(\"A bad connection (\" + conn.getRealHashCode() + \") attempted to return to the pool, discarding connection.\"); }//统计无效连接数量 state.badConnectionCount++; } } } 在上面代码分析当中,我们可以多次看见isValid()方法,除了校验PooledConnection.valid属性外,还会调用dataSource.pingConnection方法让数据库执行poolPingQuery的测试SQL数据库语句,下面让我讨论下关于校验连接有效的过程: 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364 /**检查连接有效性 * @return True if the connection is usable */ public boolean isValid() { return valid && realConnection != null && dataSource.pingConnection(this); }/**通过发送ping检查SQL判断当前数据库连接是否已经失效,毕竟数据库对长时间的链接也会做失效处理 * Method to check to see if a connection is still usable * * @param conn - the connection to check * @return True if the connection is still usable */ protected boolean pingConnection(PooledConnection conn) {//ping操作成功的标志 boolean result = true; try {//先检查数据库连接是否已经关闭 result = !conn.getRealConnection().isClosed(); } catch (SQLException e) { if (log.isDebugEnabled()) { log.debug(\"Connection \" + conn.getRealHashCode() + \" is BAD: \" + e.getMessage()); } result = false; }//1、如果没关闭进一步检查 if (result) {//2、检查SQL检查的心跳开关 if (poolPingEnabled) {//3、检查poolPingConnectionsNotUsedFor是否已经超过(当前时间距离最近一次使用的时间间距) if (poolPingConnectionsNotUsedFor >= 0 && conn.getTimeElapsedSinceLastUse() > poolPingConnectionsNotUsedFor) { try { if (log.isDebugEnabled()) { log.debug(\"Testing connection \" + conn.getRealHashCode() + \" ...\"); } Connection realConn = conn.getRealConnection();//下面是执行SQL测试语句 try (Statement statement = realConn.createStatement()) { statement.executeQuery(poolPingQuery).close(); } if (!realConn.getAutoCommit()) { realConn.rollback(); } result = true; if (log.isDebugEnabled()) { log.debug(\"Connection \" + conn.getRealHashCode() + \" is GOOD!\"); } } catch (Exception e) { log.warn(\"Execution of ping query '\" + poolPingQuery + \"' failed: \" + e.getMessage()); try { conn.getRealConnection().close(); } catch (Exception e2) { //ignore } result = false; if (log.isDebugEnabled()) { log.debug(\"Connection \" + conn.getRealHashCode() + \" is BAD: \" + e.getMessage()); } } } } } return result; }","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"设计模式原则","date":"2019-12-22T15:43:00.000Z","path":"2019/12/22/设计模式原则/","text":"幕布:软件设计原则 思维导图","tags":[{"name":"software engineering","slug":"software-engineering","permalink":"https://caochikai.github.io/tags/software-engineering/"}]},{"title":"Mybatis技术内幕源码解析:资源加载","date":"2019-12-22T06:10:00.000Z","path":"2019/12/22/Mybatis技术内幕源码解析:资源加载/","text":"2.5、资源加载2.5.1、类加载器 JVM类加载器(ClassLoader)负责加载各种资源(主要是class字节码文件),来源比如文件系统、网络资源或者其他来源,且默认使用的是双亲委派模式。类加载器基本三大特性为延迟加载、职责分明、传递性。而JVM 中内置了三个重要的 ClassLoader: BootstrapClassLoader、ExtensionClassLoader 和 AppClassLoader。URLClassLoader 不但可以加载远程类库,还可以加载本地路径的类库,取决于构造器中不同的地址形式。ExtensionClassLoader 和 AppClassLoader 都是 URLClassLoader 的子类,它们都是从本地文件系统里加载类库。 BootstrapClassLoader(根加载器):加载JVM核心类,比如$JAVA_HOME/lib/rt.jar; ExtensionClassLoader(扩展加载器):加载扩展类,以 javax 开头的swing 系列、内置的 js 引擎、xml 解析器等等; AppClassLoader(用户加载器):ClassLoader.getSystemClassLoader()获得,加载Classpath 环境变量下目录和jar; Thread.contextClassLoader(线程上下文类加载器):主要是类隔离或者共享; 双亲委派模式:简单了来说子类有个parent 属性指向它的父加载器(类似指针),先检查自己是否已经加载过了,如果没有加载过就优先让父类尝试加载(理解为很懒都坑爹),如果已经加载或者坑爹不成那就自己干。 自定义类加载器:继承java.lang.ClassLoader,比如Tomcat(WebAppClassLoader)、JBoss类加载器; 12345678910package java.lang;...public abstract class ClassLoader { // The parent class loader for delegation // Note: VM hardcoded the offset of this field, thus all new fields // must be added *after* it.//如果parent为空(比如ExtensionClassLoader),那默认null就是根加载器BootstrapClassLoader private final ClassLoader parent; ...} 2.5.2 ClassloaderWrapper 看名字就知道是ClassLoader的包装器,org.apache.ibatis.io包就封装了资源加载文件的相关API,通过ClassloaderWrapper包装器就可以调整多个ClassLoader的使用顺序。ClassLoaderWrapper的主要方法可以分为三类,分别是getResourceAsURL方法、classForName方法、getResourceAsStream方法。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112package org.apache.ibatis.io;import java.io.InputStream;import java.net.URL;/** * A class to wrap access to multiple class loaders making them work as one * * @author Clinton Begin */public class ClassLoaderWrapper { //指定的默认类加载器 ClassLoader defaultClassLoader; //SecurityManager系统类加载器 ClassLoader systemClassLoader; ClassLoaderWrapper() { try { //初始化类加载器 systemClassLoader = ClassLoader.getSystemClassLoader(); } catch (SecurityException ignored) { // AccessControlException on Google App Engine } } ClassLoader[] getClassLoaders(ClassLoader classLoader) { return new ClassLoader[]{ classLoader,//参数指定加载器 defaultClassLoader,//默认加载器 Thread.currentThread().getContextClassLoader(),//线程上下文类加载器 getClass().getClassLoader(),//当前类所使用的类加载器 systemClassLoader};//System ClassLoader } ...省略部分重载方法 /** * Try to get a resource from a group of classloaders * * @param resource - the resource to get * @param classLoader - the classloaders to examine * @return the resource or null */ InputStream getResourceAsStream(String resource, ClassLoader[] classLoader) { for (ClassLoader cl : classLoader) { if (null != cl) { // try to find the resource as passed InputStream returnValue = cl.getResourceAsStream(resource); // now, some class loaders want this leading \"/\", so we'll add it and try again if we didn't find the resource if (null == returnValue) { returnValue = cl.getResourceAsStream(\"/\" + resource); } if (null != returnValue) { return returnValue; } } } return null; } /** * Get a resource as a URL using the current class path * * @param resource - the resource to locate * @param classLoader - the class loaders to examine * @return the resource or null */ URL getResourceAsURL(String resource, ClassLoader[] classLoader) { URL url; for (ClassLoader cl : classLoader) { if (null != cl) { // look for the resource as passed in... url = cl.getResource(resource); // ...but some class loaders want this leading \"/\", so we'll add it // and try again if we didn't find the resource if (null == url) { url = cl.getResource(\"/\" + resource); } // \"It's always in the last place I look for it!\" // ... because only an idiot would keep looking for it after finding it, so stop looking already. if (null != url) { return url; } } } // didn't find it anywhere. return null; } /** * Attempt to load a class from a group of classloaders * * @param name - the class to load * @param classLoader - the group of classloaders to examine * @return the class * @throws ClassNotFoundException - Remember the wisdom of Judge Smails: Well, the world needs ditch diggers, too. */ Class<?> classForName(String name, ClassLoader[] classLoader) throws ClassNotFoundException { for (ClassLoader cl : classLoader) { if (null != cl) { try { Class<?> c = Class.forName(name, true, cl) if (null != c) { return c; } } catch (ClassNotFoundException e) { // we'll ignore this until all classloaders fail to locate the class } } } throw new ClassNotFoundException(\"Cannot find class: \" + name); }} 2.5.3 ResolverUtil ResolverUtil根据指定的条件查找指定包下的类,其中条件由Test接口中定义了matches方法提供。 upload successful 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112package org.apache.ibatis.io;import ***import org.apache.ibatis.logging.Log;import org.apache.ibatis.logging.LogFactory;public class ResolverUtil<T> { /** * A simple interface that specifies how to test classes to determine if they * are to be included in the results produced by the ResolverUtil. */ public interface Test { /** * Will be called repeatedly with candidate classes. Must return True if a class * is to be included in the results, false otherwise. */ boolean matches(Class<?> type); } /** * A Test that checks to see if each class is assignable to the provided class. Note * that this test will match the parent type itself if it is presented for matching. */ public static class IsA implements Test { private Class<?> parent; //...构造方法初始化parent /** Returns true if type is assignable to the parent type supplied in the constructor. */ @Override public boolean matches(Class<?> type) { //判断parent是否是type的父类(继承) return type != null && parent.isAssignableFrom(type); } } /** * A Test that checks to see if each class is annotated with a specific annotation. If it * is, then the test returns true, otherwise false. */ public static class AnnotatedWith implements Test { private Class<? extends Annotation> annotation; //...构造方法初始化annotation /** Returns true if the type is annotated with the class provided to the constructor. */ @Override public boolean matches(Class<?> type) { //判断Type类上是否添加了annotation注解 return type != null && type.isAnnotationPresent(annotation); } } /** The set of matches being accumulated. */ private Set<Class<? extends T>> matches = new HashSet<>(); /** * The ClassLoader to use when looking for classes. If null then the ClassLoader returned * by Thread.currentThread().getContextClassLoader() will be used. */ private ClassLoader classloader; /** * Provides access to the classes discovered so far. If no calls have been made to * any of the {@code find()} methods, **this set will be empty**. * * @return 匹配的类. */ public Set<Class<? extends T>> getClasses() { return matches; } /** * Returns the classloader that will be used for scanning for classes. If no explicit * ClassLoader has been set by the calling,the context class loader will be used. * 默认是线程上下文类加载器 */ public ClassLoader getClassLoader() { return classloader == null ? Thread.currentThread().getContextClassLoader() : classloader; } public void setClassLoader(ClassLoader classloader) { this.classloader = classloader; } /** * Scans for classes starting at the package provided and descending into subpackages. * Each class is offered up to the Test as it is discovered, and if the Test returns * true the class is retained. Accumulated classes can be fetched by calling * * @param test an instance of {@link Test} that will be used to filter classes * @param packageName the name of the package from which to start scanning for * classes, e.g. */ public ResolverUtil<T> find(Test test, String packageName) { //根据包名获取对应的路径 String path = getPackagePath(packageName); try { //根据VFS结果集合查找上面path下的资源 List<String> children = VFS.getInstance().list(path); for (String child : children) { if (child.endsWith(\".class\")) { //检查class是否符合结果 addIfMatching(test, child); } } } catch (IOException ioe) { log.error(\"Could not read package: \" + packageName, ioe); } return this; } 12345678910111213141516171819202122232425262728293031323334 /** * Add the class designated by the fully qualified class name provided to the set of * resolved classes if and only if it is approved by the Test supplied. * * @param test the test used to determine if the class matches 查找条件 * @param fqn the fully qualified name of a class 类的完全限定名称 */ @SuppressWarnings(\"unchecked\") protected void addIfMatching(Test test, String fqn) { try { String externalName = fqn.substring(0, fqn.indexOf('.')).replace('/', '.'); ClassLoader loader = getClassLoader(); if (log.isDebugEnabled()) { log.debug(\"Checking to see if class \" + externalName + \" matches criteria [\" + test + \"]\"); } Class<?> type = loader.loadClass(externalName);//加载指定的类 if (test.matches(type)) { //如果条件过滤在添加到匹配结果集合matches matches.add((Class<T>) type); } } catch (Throwable t) { log.warn(\"Could not examine class '\" + fqn + \"'\" + \" due to a \" + t.getClass().getName() + \" with message: \" + t.getMessage()); } }}//使用例子:在pkg1和pkg2这两个包下查找实现了ActionBean这个类ResolverUtil<ActionBean> resolver = new ResolverUtil<ActionBean>();resolver.findImplementation(ActionBean.class, pkg1, pkg2);resolver.find(new CustomTest(), pkg1);resolver.find(new CustomTest(), pkg2);//获取上面三个方法三次查找的结果集Collection<ActionBean> beans = resolver.getClasses(); 2.5.5 VFS(虚拟文件系统) VFS是Virtual File System缩写,用来查找指定路径下的资源。VFS也是一个抽象类,Mybatis在org.apache.ibatis.io下有DefaultVFS和JBoss6VFS的实现,UML图如下: 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485package org.apache.ibatis.io;import ...import org.apache.ibatis.logging.Log;import org.apache.ibatis.logging.LogFactory;/** * Provides a very simple API for accessing resources within an application server. * * @author Ben Gunter */public abstract class VFS { /** The built-in implementations.记录了两个实现类 */ public static final Class<?>[] IMPLEMENTATIONS = { JBoss6VFS.class, DefaultVFS.class }; /** The list to which implementations are added by {@link #addImplClass(Class)}. *用户自定义的VES实现类。addImplClass()方法会将指定的VES实现Class对象添加到集合。 */ public static final List<Class<? extends VFS>> USER_IMPLEMENTATIONS = new ArrayList<>(); ... /** Singleton instance holder.单例 */ private static class VFSHolder { static final VFS INSTANCE = createVFS(); @SuppressWarnings(\"unchecked\") static VFS createVFS() { // Try the user implementations first, then the built-ins List<Class<? extends VFS>> impls = new ArrayList<>();//优先使用用户定义的VFS的实现类 impls.addAll(USER_IMPLEMENTATIONS);//随后使用Mybatis提供的JBoss6VFS.class, DefaultVFS.class impls.addAll(Arrays.asList((Class<? extends VFS>[]) IMPLEMENTATIONS));//遍历所有实现类,依次实例化VFS对象并判断是否可用,可用就返回对象结束循环 // Try each implementation class until a valid one is found VFS vfs = null; for (int i = 0; vfs == null || !vfs.isValid(); i++) { Class<? extends VFS> impl = impls.get(i); try { vfs = impl.getDeclaredConstructor().newInstance(); if (!vfs.isValid()) { if (log.isDebugEnabled()) { log.debug(\"VFS implementation \" + impl.getName() + \" is not valid in this environment.\"); } } } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { log.error(\"Failed to instantiate \" + impl, e); return null; } } if (log.isDebugEnabled()) { log.debug(\"Using VFS adapter \" + vfs.getClass().getName()); } return vfs; } } /**单例模式体现,通过静态类创建 * Get the singleton {@link VFS} instance. If no {@link VFS} implementation can be found for the * current environment, then this method returns null. */ public static VFS getInstance() { return VFSHolder.INSTANCE; }//抽象方法 /*isValid()负责检测当前VFS对象在当前环境下是否有效* Return true if the {@link VFS} implementation is valid for the current environment. */ public abstract boolean isValid(); /**负责查找指定的资源名称列表 * Recursively list the full resource path of all the resources that are children of the * resource identified by a URL. * * @param url The URL that identifies the resource to list. * @param forPath The path to the resource that is identified by the URL. Generally, this is the * value passed to {@link #getResources(String)} to get the resource URL. * @return A list containing the names of the child resources. * @throws IOException If I/O errors occur */ protected abstract List<String> list(URL url, String forPath) throws IOException;} VFS中定义了list(URL,String)和isValid()两个抽象方法,在ResolverUtil.find()方法查找类文件时会调用list()方法的重载方法,该重载最终会调用list(URL,String)这个重载。我们以DefaultVFS为例进行分析,实现如下: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173ackage org.apache.ibatis.io;import ***import org.apache.ibatis.logging.Log;import org.apache.ibatis.logging.LogFactory;/**VFS的默认实现方法,适用于大多数的应用服务 * A default implementation of {@link VFS} that works for most application servers. * * @author Ben Gunter */public class DefaultVFS extends VFS { private static final Log log = LogFactory.getLog(DefaultVFS.class); //判断是否是jar文件的字节特征 /** The magic header that indicates a JAR (ZIP) file. */ private static final byte[] JAR_MAGIC = { 'P', 'K', 3, 4 }; @Override public boolean isValid() { return true; } @Override public List<String> list(URL url, String path) throws IOException { InputStream is = null; try { List<String> resources = new ArrayList<>(); // First, try to find the URL of a JAR file containing the requested resource. If a JAR // file is found, then we'll list child resources by reading the JAR. //尝试读取jar文件,返回对应的URL,如果为空那就代表不是jar资源 URL jarUrl = findJarForResource(url); if (jarUrl != null) { is = jarUrl.openStream(); if (log.isDebugEnabled()) { log.debug(\"Listing \" + url); } //遍历jar包中以path开头的资源列表 resources = listResources(new JarInputStream(is), path); } else { //遍历url的子目录并记录到children List<String> children = new ArrayList<>(); try { if (isJar(url)) { // Some versions of JBoss VFS might give a JAR stream even if the resource // referenced by the URL isn't actually a JAR is = url.openStream(); try (JarInputStream jarInput = new JarInputStream(is)) { if (log.isDebugEnabled()) { log.debug(\"Listing \" + url); } for (JarEntry entry; (entry = jarInput.getNextJarEntry()) != null; ) { if (log.isDebugEnabled()) { log.debug(\"Jar entry: \" + entry.getName()); } children.add(entry.getName()); } } } else { /* 大意是部分应用是资源列表在文本文件的里,下面尝试逐行读取资源, *路径为path + \"/\" + line(每行内容)若读取成功为文件,否则为文件夹 * Some servlet containers allow reading from directory resources like a * text file, listing the child resources one per line. However, there is no * way to differentiate between directory and file resources just by reading * them. To work around that, as each line is read, try to look it up via * the class loader as a child of the current resource. If any line fails * then we assume the current resource is not a directory. */ is = url.openStream(); List<String> lines = new ArrayList<>(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { for (String line; (line = reader.readLine()) != null;) { if (log.isDebugEnabled()) { log.debug(\"Reader entry: \" + line); } lines.add(line); if (getResources(path + \"/\" + line).isEmpty()) { lines.clear(); break; } } } if (!lines.isEmpty()) { if (log.isDebugEnabled()) { log.debug(\"Listing \" + url); } children.addAll(lines); } } } catch (FileNotFoundException e) { /*若读取URL失败则直接整个文件夹下所有的文件 * For file URLs the openStream() call might fail, depending on the servlet * container, because directories can't be opened for reading. If that happens, * then list the directory directly instead. */ if (\"file\".equals(url.getProtocol())) { File file = new File(url.getFile()); if (log.isDebugEnabled()) { log.debug(\"Listing directory \" + file.getAbsolutePath()); } if (file.isDirectory()) { if (log.isDebugEnabled()) { log.debug(\"Listing \" + url); } children = Arrays.asList(file.list()); } } else { // No idea where the exception came from so rethrow it throw e; } } // The URL prefix to use when recursively listing child resources String prefix = url.toExternalForm(); if (!prefix.endsWith(\"/\")) { prefix = prefix + \"/\"; } // Iterate over immediate children, adding files and recursing into directories for (String child : children) { String resourcePath = path + \"/\" + child; resources.add(resourcePath); URL childUrl = new URL(prefix + child); resources.addAll(list(childUrl, resourcePath)); } } return resources; } finally { ...关闭流 } } /** * List the names of the entries in the given {@link JarInputStream} that begin with the * specified {@code path}. Entries will match with or without a leading slash. * * @param jar The JAR input stream * @param path The leading path to match * @return The names of all the matching entries * @throws IOException If I/O errors occur */ protected List<String> listResources(JarInputStream jar, String path) throws IOException { // Include the leading and trailing slash when matching names //...如果path不是以/开始和结束,则在其开始和结束位置添加/(略) // Iterate over the entries and collect those that begin with the requested path List<String> resources = new ArrayList<>(); //遍历jar包,将以path开头资源加入到resources 集合 for (JarEntry entry; (entry = jar.getNextJarEntry()) != null;) { if (!entry.isDirectory()) { // Add leading slash if it's missing StringBuilder name = new StringBuilder(entry.getName()); if (name.charAt(0) != '/') { name.insert(0, '/'); } // Check file name->path开头 if (name.indexOf(path) == 0) { if (log.isDebugEnabled()) { log.debug(\"Found resource: \" + name); } // Trim leading slash resources.add(name.substring(1)); } } } return resources; } ...} 参考文章 Oracle Location-Independent Access to Resources 老大难的 Java ClassLoader 再不理解就老了","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"Mybatis技术内幕源码解析:日志模块","date":"2019-12-21T13:19:00.000Z","path":"2019/12/21/Mybatis技术内幕源码解析:日志模块/","text":"2.4、日志模块 Mybatis日志模块使用了适配器模式,内部调用org.apache.ibatis.logging.Log 接口。为了整合第三方的日志组件Log4J2、Log4J,mybatis提供了多种Adapter适配这些日志组件的API,并遵守Log 接口标准。在日志级别支持中,Mybatis提供了trace、debug、warn、eror四个级别,只能说基本满足绝大多数场景的日志。 12345678910111213141516171819202122package org.apache.ibatis.logging;/** * @author Clinton Begin */public interface Log { //is开头方法为判断登记方法 boolean isDebugEnabled(); boolean isTraceEnabled(); //下面四个分别支持四种等级日志调用 void error(String s, Throwable e); void error(String s); void debug(String s); void trace(String s); void warn(String s);} LogFactory负责创建对应的日志组件的适配器,其内部逻辑基本公共静态代码块加载支持的日志适配器,然后使用logConstructor集合记录所有支持的日志适配器。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869package org.apache.ibatis.logging;import java.lang.reflect.Constructor;/** * @author Clinton Begin * @author Eduardo Macarron */public final class LogFactory { /** * Marker to be used by logging implementations that support markers. */ public static final String MARKER = \"MYBATIS\"; //第三方日志组件的集合 private static Constructor<? extends Log> logConstructor; static { tryImplementation(LogFactory::useSlf4jLogging); tryImplementation(LogFactory::useCommonsLogging); tryImplementation(LogFactory::useLog4J2Logging); tryImplementation(LogFactory::useLog4JLogging); tryImplementation(LogFactory::useJdkLogging); tryImplementation(LogFactory::useNoLogging); } private LogFactory() { // disable construction } public static Log getLog(Class<?> aClass) { return getLog(aClass.getName()); } public static Log getLog(String logger) { try { //返回logger对象 return logConstructor.newInstance(logger); } catch (Throwable t) { throw new LogException(\"Error creating logger for logger \" + logger + \". Cause: \" + t, t); } } ...use开头方法省略都会走下面方法setImplementation(对应适配器类) private static void tryImplementation(Runnable runnable) { if (logConstructor == null) { try { runnable.run(); } catch (Throwable t) { // ignore } } } private static void setImplementation(Class<? extends Log> implClass) { try { //获取指定适配器的构造方法 Constructor<? extends Log> candidate = implClass.getConstructor(String.class); //实例化适配器 Log log = candidate.newInstance(LogFactory.class.getName()); if (log.isDebugEnabled()) { log.debug(\"Logging initialized using '\" + implClass + \"' adapter.\"); } //初始化logConstructor logConstructor = candidate; } catch (Throwable t) { throw new LogException(\"Error setting Log implementation. Cause: \" + t, t); } }} 2.4.4、JDBC调试日志 org.apache.ibatis.logging.jdbc是Mybatis通过动态代理的方式,将JDBC操作通过指定的日志框架打印出来,输出内容包含sql语句、绑定参数、影响行数等等。BaseJdbcLogger是jdbc下所有logger类的父类,继承树如下: upload successful 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394package org.apache.ibatis.logging.jdbc;import ...省略部分导入import org.apache.ibatis.logging.Log;import org.apache.ibatis.reflection.ArrayUtil;/** * Base class for proxies to do logging. * * @author Clinton Begin * @author Eduardo Macarron */public abstract class BaseJdbcLogger { //PreparedStatement接口中定义的常用的set*()方法名 protected static final Set<StrinSg> SET_METHODS; //Statement接口和PreparedStatement接口中与执行SQL语句相关的方法名 protected static final Set<String> EXECUTE_METHODS = new HashSet<>(); //PreparedStatement.set*()方法设置的键值对,key为parameterIndex下标,value为列值 private final Map<Object, Object> columnMap = new HashMap<>(); //parameterIndex集合 private final List<Object> columnNames = new ArrayList<>(); //value集合 private final List<Object> columnValues = new ArrayList<>(); //当前支持的适配器实例 protected final Log statementLog; //Sql层数,用于格式化sql,buffer堆栈的深度->char[] buffer = new char[queryStack * 2 + 2]; protected final int queryStack; /* * Default constructor,传入适配器实例 */ public BaseJdbcLogger(Log log, int queryStack) { this.statementLog = log; if (queryStack == 0) { this.queryStack = 1; } else { this.queryStack = queryStack; } } static { //lambada获取PreparedStatement开通setXXX方法名称集合 SET_METHODS = Arrays.stream(PreparedStatement.class.getDeclaredMethods()) .filter(method -> method.getName().startsWith(\"set\")) .filter(method -> method.getParameterCount() > 1) .map(Method::getName) .collect(Collectors.toSet()); //执行方法代理目标 EXECUTE_METHODS.add(\"execute\"); EXECUTE_METHODS.add(\"executeUpdate\"); EXECUTE_METHODS.add(\"executeQuery\"); EXECUTE_METHODS.add(\"addBatch\"); } static { //往SET_METHODS集合添加记录 SET_METHODS.add(\"setString\"); SET_METHODS.add(\"setNString\"); SET_METHODS.add(\"setInt\"); SET_METHODS.add(\"setByte\"); SET_METHODS.add(\"setShort\"); SET_METHODS.add(\"setLong\"); SET_METHODS.add(\"setDouble\"); SET_METHODS.add(\"setFloat\"); SET_METHODS.add(\"setTimestamp\"); SET_METHODS.add(\"setDate\"); SET_METHODS.add(\"setTime\"); SET_METHODS.add(\"setArray\"); SET_METHODS.add(\"setBigDecimal\"); SET_METHODS.add(\"setAsciiStream\"); SET_METHODS.add(\"setBinaryStream\"); SET_METHODS.add(\"setBlob\"); SET_METHODS.add(\"setBoolean\"); SET_METHODS.add(\"setBytes\"); SET_METHODS.add(\"setCharacterStream\"); SET_METHODS.add(\"setNCharacterStream\"); SET_METHODS.add(\"setClob\"); SET_METHODS.add(\"setNClob\"); SET_METHODS.add(\"setObject\"); SET_METHODS.add(\"setNull\"); EXECUTE_METHODS.add(\"execute\"); EXECUTE_METHODS.add(\"executeUpdate\"); EXECUTE_METHODS.add(\"executeQuery\"); EXECUTE_METHODS.add(\"addBatch\"); } protected void setColumn(Object key, Object value) { columnMap.put(key, value); columnNames.add(key); columnValues.add(value); } ...} PreparedStatementLogger继承了BaseJdbcLogger并实现了InvocationHandler接口。PreparedStatementLogger.invoke方法会为EXECUTE_METHODS集合中的方法、SET_METHODS集合中的方法、getResultSet等方法提供代理,具体代码如下: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100package org.apache.ibatis.logging.jdbc;import java.lang.reflect.InvocationHandler;import java.lang.reflect.Method;import java.lang.reflect.Proxy;import java.sql.CallableStatement;import java.sql.PreparedStatement;import java.sql.ResultSet;import org.apache.ibatis.logging.Log;import org.apache.ibatis.reflection.ExceptionUtil;/** * PreparedStatement proxy to add logging. * * @author Clinton Begin * @author Eduardo Macarron * */public final class PreparedStatementLogger extends BaseJdbcLogger implements InvocationHandler { private final PreparedStatement statement; private PreparedStatementLogger(PreparedStatement stmt, Log statementLog, int queryStack) { super(statementLog, queryStack); this.statement = stmt; } @Override public Object invoke(Object proxy, Method method, Object[] params) throws Throwable { try { if (Object.class.equals(method.getDeclaringClass())) { return method.invoke(this, params); } //调用了EXECUTE_METHODS集合中的方法 if (EXECUTE_METHODS.contains(method.getName())) { if (isDebugEnabled()) { //日志输出,输出的是参数值以及参数类型->() debug(\"Parameters: \" + getParameterValueString(), true); } clearColumnInfo();//清空BaseJdbcLogger中定义的三个column*集合 //如果是executeQuery方法,则为ResultSet创建代理对象,不是则直接返回结果 if (\"executeQuery\".equals(method.getName())) { ResultSet rs = (ResultSet) method.invoke(statement, params); return rs == null ? null : ResultSetLogger.newInstance(rs, statementLog, queryStack); } else { return method.invoke(statement, params); } } else if (SET_METHODS.contains(method.getName())) { //如果是SET_METHODS集合中的方法,则通过setColumn记录到 //BaseJdbcLogger的三个column*集合 if (\"setNull\".equals(method.getName())) { setColumn(params[0], null); } else { setColumn(params[0], params[1]); } return method.invoke(statement, params); } else if (\"getResultSet\".equals(method.getName())) { //如果调用getResultSet()方法,则为ResultSet创建代理对象 ResultSet rs = (ResultSet) method.invoke(statement, params); return rs == null ? null : ResultSetLogger.newInstance(rs, statementLog, queryStack); } else if (\"getUpdateCount\".equals(method.getName())) { int updateCount = (Integer) method.invoke(statement, params); if (updateCount != -1) { debug(\" Updates: \" + updateCount, false); } //返回影响条数 return updateCount; } else { return method.invoke(statement, params); } } catch (Throwable t) { throw ExceptionUtil.unwrapThrowable(t); } } /** * Creates a logging version of a PreparedStatement. * * @param stmt - the statement * @param statementLog - the statement log * @param queryStack - the query stack * @return - the proxy 使用JDK动态代理的方式创建代理对象 */ public static PreparedStatement newInstance(PreparedStatement stmt, Log statementLog, int queryStack) { InvocationHandler handler = new PreparedStatementLogger(stmt, statementLog, queryStack); ClassLoader cl = PreparedStatement.class.getClassLoader(); return (PreparedStatement) Proxy.newProxyInstance(cl, new Class[]{PreparedStatement.class, CallableStatement.class}, handler); } /** * Return the wrapped prepared statement. * * @return the PreparedStatement */ public PreparedStatement getPreparedStatement() { return statement; }} ResultSetLogger 中封装了ResultSet对象,也继承了BaseldbcLogger抽象类并实现了InvocationHandler 接口。ResultSetLogger中定义的字段如下: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124package org.apache.ibatis.logging.jdbc;import java.lang.reflect.InvocationHandler;import java.lang.reflect.Method;import java.lang.reflect.Proxy;import java.sql.ResultSet;import java.sql.ResultSetMetaData;import java.sql.SQLException;import java.sql.Types;import java.util.HashSet;import java.util.Set;import org.apache.ibatis.logging.Log;import org.apache.ibatis.reflection.ExceptionUtil;/** * ResultSet proxy to add logging * * @author Clinton Begin * @author Eduardo Macarron * */public final class ResultSetLogger extends BaseJdbcLogger implements InvocationHandler { //记录了超大长度的类型 private static Set<Integer> BLOB_TYPES = new HashSet<>(); //是否是ResultSet结果集的第一行 private boolean first = true; //统计行数 private int rows; private final ResultSet rs; //记录了超大字段的列编号 private final Set<Integer> blobColumns = new HashSet<>(); static { //所有超大长度的类型 BLOB_TYPES.add(Types.BINARY); BLOB_TYPES.add(Types.BLOB); BLOB_TYPES.add(Types.CLOB); BLOB_TYPES.add(Types.LONGNVARCHAR); BLOB_TYPES.add(Types.LONGVARBINARY); BLOB_TYPES.add(Types.LONGVARCHAR); BLOB_TYPES.add(Types.NCLOB); BLOB_TYPES.add(Types.VARBINARY); } @Override public Object invoke(Object proxy, Method method, Object[] params) throws Throwable { try { if (Object.class.equals(method.getDeclaringClass())) { return method.invoke(this, params); } Object o = method.invoke(rs, params); if (\"next\".equals(method.getName())) { //是否还有下一行 if ((Boolean) o) { rows++; if (isTraceEnabled()) { ResultSetMetaData rsmd = rs.getMetaData(); //获取数据集的列数 final int columnCount = rsmd.getColumnCount(); if (first) { first = false; //输出表头,并填充超大长度的类型到集合中 printColumnHeaders(rsmd, columnCount); } //输出该行记录,注意会过滤掉blobColumns中记录的列, //这些列的数据较大,不会输出到日志 printColumnValues(columnCount); } } else { //遍历ResultSet之后输出总,完例子:Total: 1 debug(\" Total: \" + rows, false); } } clearColumnInfo();//清空BaseJdbcLogger的三个column*集合 return o; } catch (Throwable t) { throw ExceptionUtil.unwrapThrowable(t); } } //例子:Header: [count(*)] private void printColumnHeaders(ResultSetMetaData rsmd, int columnCount) throws SQLException { StringBuilder row = new StringBuilder(); row.append(\" Columns: \"); for (int i = 1; i <= columnCount; i++) { if (BLOB_TYPES.contains(rsmd.getColumnType(i))) { blobColumns.add(i); } String colname = rsmd.getColumnLabel(i); row.append(colname); if (i != columnCount) { row.append(\", \"); } } trace(row.toString(), false); } //例子:Row: 39 private void printColumnValues(int columnCount) { StringBuilder row = new StringBuilder(); row.append(\" Row: \"); for (int i = 1; i <= columnCount; i++) { String colname; try { if (blobColumns.contains(i)) { colname = \"<<BLOB>>\"; } else { colname = rs.getString(i); } } catch (SQLException e) { // generally can't call getString() on a BLOB column colname = \"<<Cannot Display>>\"; } row.append(colname); if (i != columnCount) { row.append(\", \"); } } trace(row.toString(), false); }...}","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"Mybatis技术内幕源码解析:类型转换","date":"2019-12-20T12:40:00.000Z","path":"2019/12/20/Mybatis技术内幕-第二章:2-3类型转换/","text":"2.3、类型转换 JDBC数据类型与Java语言中的数据类型并不是完全对应的,所以在PreparedStatement为SOL语句绑定参数时,需要从Java类型转换成JDBC类型,而从结果集中获取数据时,则需要从JDBC类型转换成Java类型。MyBatis使用类型处理器完成上述两种转换。 upload successful 2.3.1、TypeHandler Mybatis当中所有类型转化器都继承BaseTypeHandler,而BaseTypeHandler又实现了TypeHandler接口。接口定义了四个方法,分成两类:setParameter()方法负责将数据由JdbcType 类型转换成Java类型;getResult()方法及其重载负责将数据由Java类型转换成JdbcType类型。 1234567891011121314151617181920212223242526package org.apache.ibatis.type;import java.sql.CallableStatement;import java.sql.PreparedStatement;import java.sql.ResultSet;import java.sql.SQLException;/** * @author Clinton Begin */public interface TypeHandler<T> { //在通过Preparedstatement为SQL语句绑定参数时,会将数据由JdbcType类型转换成Java类型 void setParameter(PreparedStatement ps, int i, T parameter, JdbcType jdbcType) throws SQLException; /** * @param columnName Colunm name, when configuration <code>useColumnLabel</code> is <code>false</code> */ //从ResultSet中获取数据,通过字段名称将数据由Java类型转换成JdbcType类型 T getResult(ResultSet rs, String columnName) throws SQLException; //从ResultSet中获取数据,通过字段下标(同上面getResult) T getResult(ResultSet rs, int columnIndex) throws SQLException; //CallableStatement中通过下标获取结果 T getResult(CallableStatement cs, int columnIndex) throws SQLException;} 一般类型转换器适用于单个参数或者单个列值完成类型转换,大多数是直接调用PreparedStatement、ResultSet、CallableStatement的对应方法, 以org.apache.ibatis.type.StringTypeHandler为例子如下: 1234567891011121314151617181920212223242526272829public class StringTypeHandler extends BaseTypeHandler<String> { @Override public void setNonNullParameter(PreparedStatement ps, int i, String parameter, JdbcType jdbcType) throws SQLException { //调用PreparedStatement.setXXX(Type)绑定参数 ps.setString(i, parameter); } @Override public String getNullableResult(ResultSet rs, String columnName) throws SQLException { //ResultSet.getXXX(Type)(列名称)获取列值 return rs.getString(columnName); } @Override public String getNullableResult(ResultSet rs, int columnIndex) throws SQLException { //ResultSet.getXXX(Type)(列下标)获取列值 return rs.getString(columnIndex); } @Override public String getNullableResult(CallableStatement cs, int columnIndex) throws SQLException { return cs.getString(columnIndex); }} 2.3.2、TypeHandlerRegistry TypeHandlerRegistry管理所有的类型转化器,register()方法实现了注册功能,过程中会向下述字段集合添加TypeHandler。 1234567891011121314151617181920212223package org.apache.ibatis.type;import .../** * @author Clinton Begin * @author Kazuki Shimizu */public final class TypeHandlerRegistry { //在读取结果数据时候,依靠该集合映射从jdbcType转换成javaType, //而JdbcType类型为org.apache.ibatis.type.JdbcType枚举类型 private final Map<JdbcType, TypeHandler<?>> jdbcTypeHandlerMap = new EnumMap<>(JdbcType.class); //记录了Java类型向指定Jdbcrype转换时,需要使用的TypeHandler对象。例如;Java类型中的string可能 //转换成教据库的 char、varchar等多种类型,所以存在一对多关系 private final Map<Type, Map<JdbcType, TypeHandler<?>>> typeHandlerMap = new ConcurrentHashMap<>(); //Object类型TypeHandler处理类 private final TypeHandler<Object> unknownTypeHandler = new UnknownTypeHandler(this); //全部Java类型以及对应的TypeHandler private final Map<Class<?>, TypeHandler<?>> allTypeHandlersMap = new HashMap<>(); //空TypeHandler集合的标识 private static final Map<JdbcType, TypeHandler<?>> NULL_TYPE_HANDLER_MAP = Collections.emptyMap(); //枚举类型的TypeHandler集合 private Class<? extends TypeHandler> defaultEnumTypeHandler = EnumTypeHandler.class;}","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"Mybatis技术内幕源码解析:反射工具箱","date":"2019-12-19T13:24:00.000Z","path":"2019/12/19/Mybatis技术内幕-第二章:基础层/","text":"第二章:基础支持层 Mybatis以SqlSessionFactory为核心,通过SqlSessionFactoryBuilder解析xml配置文件或Configration实例构建出SqlSessionFactory的实例。 一、重要概念 命名空间(Namespaces):通常包名加类目组成完全限定名(com.MyMapper.selectAll),实现语句隔离确定唯一性。 作用域(Scope)和生命周期 SqlSessionFactoryBuilder:完成创建SqlSessionFactory后就不再需要。 SqlSessionFactory:最佳作用域是应用作用域,最优解是使用单例模式或者静态单例模式。 SqlSession:最佳的作用域是请求或方法作用域 2.2、反射工具箱 Mybatis进行参数处理、结果映射会涉及到大量的反射操作。Java反射功能虽然强大,但是代码复杂易错,所以在mybaits源码包org.apache.ibatis.reflection有专门的反射模块。 2.2.1 Reflector&ReflectorFactory Reflector是反射模块的基础类,一个reflecotr实例对应一个类的元信息。根据Java Bean规范,封装的对getter、setter属性方法映射。 Reflector成员字段分析如下: 12345678910111213141516171819202122232425262728293031package org.apache.ibatis.reflection;import ....(省略导入类)/** * This class represents a cached set of class definition information that * allows for easy mapping between property names and getter/setter methods. * * @author Clinton Begin */public class Reflector { //对应的class类型 private final Class<?> type; //getter方法对应的属性名称数组 private final String[] readablePropertyNames; //setter方法对应的属性名称数组 private final String[] writablePropertyNames; //属性对应的setter方法集合,key是属性名称,value是Invoker对象 private final Map<String, Invoker> setMethods = new HashMap<>(); //属性对应的getter方法集合,key是属性名称,value是Invoker对象 private final Map<String, Invoker> getMethods = new HashMap<>(); //记录了属性相应的setter方法的参数值类型,key是属性名称,value是setter方法的参数类型 private final Map<String, Class<?>> setTypes = new HashMap<>(); //记录了属性相应的getter方法的参数值类型,key是属性名称,value是setter方法的参数类型 private final Map<String, Class<?>> getTypes = new HashMap<>(); //记录默认构造器 private Constructor<?> defaultConstructor; //所有属性名称集合 private Map<String, String> caseInsensitivePropertyMap = new HashMap<>(); } ReflectorFactory接口定义了Reflector对象创建或者缓存 12345678910package org.apache.ibatis.reflection;public interface ReflectorFactory { //确定是否需要缓存该Reflector对象 boolean isClassCacheEnabled(); //设置是否缓存该Reflector对象 void setClassCacheEnabled(boolean classCacheEnabled); //刘建指定Class对应的Reflector对象 Reflector findForClass(Class<?> type);} DefaultReflectorFactory默认实现ReflectorFactory,而CustomReflectorFactory继承DefaultReflectorFactory并且空实现,其关系图如下 DefaultReflectorFactory 12345678910111213141516171819202122232425262728293031323334353637package org.apache.ibatis.reflection;import java.util.concurrent.ConcurrentHashMap;import java.util.concurrent.ConcurrentMap;public class DefaultReflectorFactory implements ReflectorFactory { //该字段决定是否开启对Reflector对象 private boolean classCacheEnabled = true; //使用ConcurrentMap集合实现对Reflector对象的缓存 private final ConcurrentMap<Class<?>, Reflector> reflectorMap = new ConcurrentHashMap<>(); public DefaultReflectorFactory() { } @Override public boolean isClassCacheEnabled() { return classCacheEnabled; } @Override public void setClassCacheEnabled(boolean classCacheEnabled) { this.classCacheEnabled = classCacheEnabled; } @Override public Reflector findForClass(Class<?> type) { if (classCacheEnabled) {//检测是否开启缓存 // synchronized (type) removed see issue #461 //通过线程安全ConcurrentHashMap获取缓存Reflector,没有则通过lambada调用构造新建对象 return reflectorMap.computeIfAbsent(type, Reflector::new); } else { //未开启缓存则直接new Reflector对象 return new Reflector(type); } }}","tags":[{"name":"Mybatis","slug":"Mybatis","permalink":"https://caochikai.github.io/tags/Mybatis/"}]},{"title":"HttpServletRequest流重复读","date":"2019-12-18T12:30:00.000Z","path":"2019/12/18/HttpServletRequest流重复读/","text":"HttpServletRequest流重复读 springmvc controller @RequestBody接受参数报错,原因为http POST请求报文体为二进制流,在HttpServletRequest.getInputStream()中流只能被读取一次,重复读取会报如下: 1getRead() has already been called for this request/getInputStream() has already been called for this request 一、解决方式 第一种方式:重写HttpServletRequestWrapper 将InputStream 替换成可重复读的ByteArrayInputStream,原理就是在Filter或者springmvc的interceptor中通过构造器包装HttpServletRequest,并且把当前流缓存起来。 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101import javax.servlet.ReadListener;import javax.servlet.ServletInputStream;import javax.servlet.ServletRequest;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpServletRequestWrapper;import java.io.BufferedReader;import java.io.ByteArrayInputStream;import java.io.ByteArrayOutputStream;import java.io.IOException;import java.io.InputStream;import java.io.InputStreamReader;import java.nio.charset.Charset;/** * 自定义HttpServletRequestWrapper * 解决InputStream不能重复读问题 * */public class BufferedServletRequestWrapper extends HttpServletRequestWrapper { private final byte[] body; BufferedServletRequestWrapper(HttpServletRequest request) throws IOException { super(request); String sessionStream = getBodyString(request); body = sessionStream.getBytes(Charset.forName(\"UTF-8\")); } /** * 获取请求Body */ private String getBodyString(final ServletRequest request) { StringBuilder sb = new StringBuilder(); InputStream inputStream = null; BufferedReader reader = null; try { inputStream = cloneInputStream(request.getInputStream()); reader = new BufferedReader(new InputStreamReader(inputStream, Charset.forName(\"UTF-8\"))); String line = \"\"; while ((line = reader.readLine()) != null) { sb.append(line); } } catch (IOException e) { e.printStackTrace(); } finally { if (inputStream != null) { try { inputStream.close(); } catch (IOException e) { e.printStackTrace(); } } if (reader != null) { try { reader.close(); } catch (IOException e) { e.printStackTrace(); } } } return sb.toString(); } /** * 复制输入流 */ private InputStream cloneInputStream(ServletInputStream inputStream) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int len; try { while ((len = inputStream.read(buffer)) > -1) { byteArrayOutputStream.write(buffer, 0, len); } byteArrayOutputStream.flush(); } catch (IOException e) { e.printStackTrace(); } return new ByteArrayInputStream(byteArrayOutputStream.toByteArray()); } @Override public BufferedReader getReader() throws IOException { return new BufferedReader(new InputStreamReader(getInputStream())); } @Override public ServletInputStream getInputStream() throws IOException { final ByteArrayInputStream bais = new ByteArrayInputStream(body); return new ServletInputStream() { @Override public int read() throws IOException { return bais.read(); } }; }} 第二种方式:Springmvc提供了解决方案ContentCachingRequestWrapper,思路也是一样,只不过代码更加严谨点。源码我就不贴了,开头注释说明贴一下,然后需要注意事项和正确的使用方式如下: 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354/** *ContentCachingRequestWrapper源码注释如下 * {@link javax.servlet.http.HttpServletRequest} wrapper that caches all content read from * the {@linkplain #getInputStream() input stream} and {@linkplain #getReader() reader}, * and allows this content to be retrieved via a {@link #getContentAsByteArray() byte array}. * * <p>Used e.g. by {@link org.springframework.web.filter.AbstractRequestLoggingFilter}. * * @author Juergen Hoeller * @author Brian Clozel * @since 4.1.3 * @see ContentCachingResponseWrapper */package com.qm.interceptor;import org.springframework.web.util.ContentCachingRequestWrapper;import javax.servlet.*;import javax.servlet.http.HttpServletRequest;import java.io.IOException;/** * 配置哪些请求可以进行重复读数据 * */public class cachingRequestBodyFilter implements Filter { @Override public void init(FilterConfig filterConfig) throws ServletException { } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { // 防止流读取一次后就没有了, 所以需要将流继续写出去 HttpServletRequest httpServletRequest = (HttpServletRequest) request; String requestURI = httpServletRequest.getRequestURI(); // 这里将原始request传入,读出流并存储 //PATH 为可重复读的路径开始或者接受部分 例如:caching.do if (requestURI.endsWith(\"caching.do\")) { // 这里将原始request传入,读出流并存储 ContentCachingRequestWrapper requestWrapper = new ContentCachingRequestWrapper(httpServletRequest); // 这里将原始request替换为包装后的request,此后所有进入controller的request均为包装后的request chain.doFilter(requestWrapper, response); } else {// 不要覆盖所有的请求,防止覆盖其他人请求 chain.doFilter(request, response); } } @Override public void destroy() { }} 二、参考如下:HttpServletRequest数据流重复读问题","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"BigDecimal金额计算","date":"2019-12-16T16:26:58.000Z","path":"2019/12/17/BigDecimal金额计算/","text":"关于金额计算,通常有加减乘除,四舍五入等。 add()加法函数:要注意BigDecimal加法得到的结果为零,因为BigDecimal的加法需要一个值去接收,加法不会改变调用者自身的值。 subtract()减法函数:同加法; multiply()乘法函数:注意Double转BigDecimal,尽量用字符串的形式初始化。因为使用BigDecimal类构造方法传入double类型时,计算的结果是不精确的! divide()除法函数:避免抛出除零异常,方式将除运算尽量转换成等价的乘运算。 保留两位小数且四舍五入:value.setScale(2, BigDecimal.ROUND_HALF_UP); BigDecimal静态常量值,比如BigDecimal.ZERO等; 例子刨析:1234567891011121314151617181920212223242526BigDecimal num1 = new BigDecimal(0.005); BigDecimal num2 = new BigDecimal(1000000); BigDecimal num3 = new BigDecimal(-1000000); //尽量用字符串的形式初始化 BigDecimal num12 = new BigDecimal(\"0.005\"); BigDecimal num22 = new BigDecimal(\"1000000\"); BigDecimal num32 = new BigDecimal(\"-1000000\");//加法 BigDecimal result1 = num1.add(num2); BigDecimal result12 = num12.add(num22); //减法 BigDecimal result2 = num1.subtract(num2); BigDecimal result22 = num12.subtract(num22); //乘法 BigDecimal result3 = num1.multiply(num2); BigDecimal result32 = num12.multiply(num22); //绝对值 BigDecimal result4 = num3.abs(); BigDecimal result42 = num32.abs(); //除法 BigDecimal result5 = num2.divide(num1,20,BigDecimal.ROUND_HALF_UP); BigDecimal result52 = num22.divide(num12,20,BigDecimal.ROUND_HALF_UP); result全部输出结果,初始化建议使用String 结果 参考如下BigDecimal加减乘除计算","tags":[]},{"title":"Springboot+vue部署路由404","date":"2019-12-15T08:32:00.000Z","path":"2019/12/15/hello-world/","text":"问题背景vue单页面路由,刷新地址或者请求链接,都会404 user www; worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; events { worker_connections 51200; multi_accept on; } http { include mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; #gzip on; gzip on; server { listen 80; root /www/app/; server_name gdhxy.cn; try_files $uri $uri/ /index.html; } server { listen 80; root /www/app/; server_name www.gdhxy.cn; try_files $uri $uri/ /index.html; } server { listen 80; server_name admin.gdhxy.cn; try_files $uri $uri/ /index.html; location / { proxy_redirect off; proxy_pass http://127.0.0.1:38806; } } }解决方式 1、nginx配置:try_files $uri $uri/ /index.html;2、springboot指定404到index.html. import org.springframework.boot.web.servlet.error.ErrorController; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import javax.servlet.http.HttpServletRequest; @Controller public class MyErrorController implements ErrorController { @RequestMapping("/error") public String handleError(HttpServletRequest request) { //获取statusCode:404,重定向到首页 Integer statusCode = (Integer) request.getAttribute("javax.servlet.error.status_code"); if (statusCode == 404) { return "/index.html"; } else { return "/500"; } } @Override public String getErrorPath() { return "/error"; } }","tags":[{"name":"Spring","slug":"Spring","permalink":"https://caochikai.github.io/tags/Spring/"}]},{"title":"服务器配置记录","date":"2019-09-27T16:48:12.000Z","path":"2019/09/28/服务器配置记录/","text":"服务器配置记录一、背景 真实生产环境部署:nginx、tomcat配置https证书 二、nginx安装过程:123456789101112131415//一键安装上面四个依赖yum -y install gcc zlib zlib-devel pcre-devel openssl openssl-devel//下载tar包wget http://nginx.org/download/nginx-1.16.1.tar.gztar xzf nginx-1.16.1.tar.gz -C /usr/local//文件名改nginx-1.16.1成nginx//进入nginx目录cd /usr/local/nginx//关联编译https模块 ./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module//执行make命令编译源码make//执行make install安装可执行binmake install//新建logs(日志)和ssl(证书)文件夹 https浏览器影响——混合内容 解决Nginx反代Tomcat Http、Https混合内容报错,浏览器认为https请求中资源是http的css、js和图片都无法正常加载,造成无法双协议兼容! 12浏览器访问后开发者模式看到的报错信息:Mixed Content: The page at 'https://dashboard.domain.com/wire' was loaded over HTTPS, but requested an insecure stylesheet 'http://dashboard.domain.com/static/css/flickity.css'. This request has been blocked; the content must be served over HTTPS. nginx解决配置12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061user www;worker_processes auto;error_log /var/log/nginx/error.log;pid /run/nginx.pid;events { worker_connections 51200; multi_accept on;}http { include mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] \"$request\" ' '$status $body_bytes_sent \"$http_referer\" ' '\"$http_user_agent\" \"$http_x_forwarded_for\"'; access_log /var/log/nginx/access.log main; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; #gzip on; gzip on; server { listen 80; listen 443 ssl; server_name chinaffxz.com; #charset koi8-r; ssl_certificate /usr/local/nginx/ssl/2879444_chinagzhxy.com.pem; ssl_certificate_key /usr/local/nginx/ssl/2879444_chinagzhxy.com.key; location / { proxy_pass http://127.0.0.1:xxxx/; proxy_redirect off; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; #解决兼容配置要点 proxy_set_header X-Forwarded-Proto $scheme; proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } }} Tomcat配置1234567891011121314151617181920212223242526272829303132<?xml version=\"1.0\" encoding=\"utf-8\"?><Server port=\"8006\" shutdown=\"SHUTDOWN\"> <Listener className=\"org.apache.catalina.core.JreMemoryLeakPreventionListener\" /> <Listener className=\"org.apache.catalina.mbeans.GlobalResourcesLifecycleListener\" /> <Listener className=\"org.apache.catalina.core.ThreadLocalLeakPreventionListener\" /> <Listener className=\"org.apache.catalina.core.AprLifecycleListener\" /> <GlobalNamingResources> <Resource name=\"UserDatabase\" auth=\"Container\" type=\"org.apache.catalina.UserDatabase\" description=\"User database that can be updated and saved\" factory=\"org.apache.catalina.users.MemoryUserDatabaseFactory\" pathname=\"conf/tomcat-users.xml\" /> </GlobalNamingResources> <Service name=\"Catalina\"> <Connector port=\"38080\" protocol=\"HTTP/1.1\" connectionTimeout=\"20000\" redirectPort=\"8443\" maxThreads=\"1000\" minSpareThreads=\"20\" acceptCount=\"1000\" maxHttpHeaderSize=\"65536\" debug=\"0\" disableUploadTimeout=\"true\" useBodyEncodingForURI=\"true\" enableLookups=\"false\" URIEncoding=\"UTF-8\" /> <Engine name=\"Catalina\" defaultHost=\"localhost\"> <!-- 解决兼容要点--> <Valve className=\"org.apache.catalina.valves.RemoteIpValve\" remoteIpHeader=\"X-Forwarded-For\" protocolHeader=\"X-Forwarded-Proto\" protocolHeaderHttpsValue=\"https\"/> <Realm className=\"org.apache.catalina.realm.LockOutRealm\"> <Realm className=\"org.apache.catalina.realm.UserDatabaseRealm\" resourceName=\"UserDatabase\" /> </Realm> <Host name=\"localhost\" appBase=\"webapps\" unpackWARs=\"true\" autoDeploy=\"true\"> <!-- Access log processes all example. Documentation at: /docs/config/valve.html Note: The pattern used is equivalent to using pattern=\"common\" --> <Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"logs\" prefix=\"localhost_access_log\" suffix=\".txt\" pattern=\"%h %l %u %t &quot;%r&quot; %s %b\" /> <Context path=\"\" docBase =\"mall\" debug=\"0\" reloadable=\"true\" crossContext=\"false\"/> </Host> </Engine> </Service></Server> git持续部署shell脚本 解决Linux CentOS中cp -f 复制强制覆盖的命令无效的方法,系统默认使用cp -i使用交互方式避免误操作,但在自动脚本中应当避免,推荐\\cp。 123456789101112#update codecd /root/dowload/mall/duoshanghugit fetch origin git pull > /root/dowload/mall/logs/mall_git.log &#package mvn package -Dmaven.test.skip=truesleep 2s#cp war to tomcat webapp\\cp -fr /root/dowload/mall/duoshanghu/target/mall.war /usr/local/env/tomcat/webapps/mall.warsleep 1s#restart.shsh /usr/local/env/tomcat/bin/restart.sh tomcat重启脚本1234567891011121314151617181920212223242526272829303132#!/bin/sh#初始化全局环境变量. /etc/profile#set java environmentexport CLASSPATH=$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib#查找tomcat的pidpid=`ps aux | grep tomcat | grep -v grep | grep -v Restart | grep -v restart | awk '{print $2}'`echo \"the tomcat pid is $pid\"#判断tomcat进程是否存在if [ -n \"$pid\" ];then sleep 1 pid=`ps aux | grep tomcat | grep -v grep | grep -v restart | grep -v Restart | awk '{print $2}'` if [ -n \"$pid\" ]; then sleep 1 echo \"tomcat进程将被杀?\" kill -9 $pid fi sleep 1 echo \"tomcat进程已经被杀死,先重新启动tomcat.\" service tomcat status sleep 1s service tomcat startelse echo \"tomcat进程不存在,先重新启动tomcat.\" service tomcat status sleep 1s service tomcat startfi maven编译完整依赖管理1、来源 解决webapp/WEB-INF/lib目录下的jar包无法用maven打包,且在linuxMaven编译报错[ERROR] Fatal Error: Unable to find package java.lang in classpath or bootclasspath,致命错误: 在类路径或引导类路径中找不到程序包 java.lang 2、解决方法Linux解决办法,使用maven自带的变量${path.separator}路径分隔符,原因是在Windows下是分号;,在linux下是冒号: 同时配置导入webapp/WEB-INF/lib和jdk的rt.jar、jce.jar,完美解决环境配置带来的无法package找不到依赖问题。 pom.xml:12345678910111213141516171819202122232425262728293031323334<build> <finalName>${artifactId}</finalName> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>1.8</source> <target>1.8</target> <compilerArguments> <verbose /> <bootclasspath>${java.home}/lib/rt.jar${path.separator}${java.home}/lib/jce.jar</bootclasspath> <extdirs>${basedir}/src/main/webapp/WEB-INF/lib</extdirs> </compilerArguments> </configuration> </plugin> </plugins> <resources> <resource> <directory>src/main/resources</directory> <includes> <include>**/*.*</include> </includes> <filtering>false</filtering> </resource> <resource> <directory>src/main/java</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> </includes> <filtering>false</filtering> </resource> </resources> </build> 参考解决Nginx反代Tomcat Http、Https混合内容报错 解决WEB-INF/lib目录下的jar包无法用maven打包 反馈与建议 当了组长面试加运维,对接一堆支付和物流、短信和推送账号,今天记录一下面向DevOps! markdown原文件在github里面,感谢各位大佬看官star,面试我要往脸上贴金哈哈哈😂。 邮箱:caochikai@qq.com,有问题发邮件。","tags":[{"name":"Linux","slug":"Linux","permalink":"https://caochikai.github.io/tags/Linux/"},{"name":"nginx","slug":"nginx","permalink":"https://caochikai.github.io/tags/nginx/"},{"name":"maven","slug":"maven","permalink":"https://caochikai.github.io/tags/maven/"},{"name":"tomcat","slug":"tomcat","permalink":"https://caochikai.github.io/tags/tomcat/"}]},{"title":"JVM记录","date":"2019-09-14T04:32:49.000Z","path":"2019/09/14/JVM记录/","text":"JVM记录一、错误 背景:自动答题爬虫由于买的腾讯云1核2G1M,Jenkins、springboot爬虫和火狐浏览器驱动。 知识背景:JDWP:调试网络协议(Java Debug Wire Protocol);调试线协议;jvmti:(Java Virtual Machine Tool Interface)jvm代理; 猜测:Selenium通过driver驱动Firefox浏览器,多次无法关闭浏览器造成内存无法释放最后溢出; 引用文章(有兴趣可深入了解):jvmti agent黑科技,阿里云云监控; 1FATAL ERROR in native method: JDWP Can't allocate jvmti memory, jvmtiError=JVMTI_ERROR_OUT_OF_MEMORY(110) 反馈与建议 立个flag:不定期更新,一更一周。 markdown原文件在github里面,感谢各位大佬看官star,面试我要往脸上贴金哈哈哈😂。 邮箱:caochikai@qq.com,有问题发邮件。","tags":[{"name":"spring","slug":"spring","permalink":"https://caochikai.github.io/tags/spring/"},{"name":"JVM","slug":"JVM","permalink":"https://caochikai.github.io/tags/JVM/"}]},{"title":"工具收藏——idea推荐插件","date":"2019-05-22T12:34:48.000Z","path":"2019/05/22/工具收藏——idea推荐插件/","text":"工具收藏——idea推荐插件一、概念 工欲善其事必先利其器,博主是个死忠工具派,为了解决一个大问题可能会收集多个工具和方案,然后求证对比出体验报告。后续文章有一大类就是工具类推荐,而本篇文章重点就是idea 安装插件记录,简要记录安装方法快速搭建个性化idea,还有一些关于UI方面插件可谓多不胜数,而且每个人口味不一,请各位自行选择——插件搜索技巧tags为Theme或者UI。 插件列表最强大插件卫冕之王——lambda表达式 名称 描述 JRebel 代替springboot dev热部署方案,最方便激活方式 Lombok 精简bean,各种功能强大又实用注解,搬砖人的MVP,结合Hutool实在完美 AceJump 光标跳跃,替代vim不二之选 MavenHelper 快速分析maven 包冲突的问题,搜索包名 MyBatis Log Plugin Restore the mybatis generate sql to original whole sql.(拼接完整sql) Log Support 2 快速log.info(),结合Lombok插件注解@Slf4j可以说无敌 Free Mybatis plugin Mybaits支持跳转,有钱大爷请收费版Mybatis plugin强大破解较少,差评 Rainbow Brackets 彩虹括号,多层嵌套代码显示助手 String Manipulation 各种各样字符串格式转化 RestfulToolkit 一套 RESTful 服务开发辅助工具集 Alibaba Cloud Toolkit 结合阿里云(非阿里也支持),多节点发布工具加强力linux客户端 stackoverflow stackoverflow快速搜索bug插件 Translation 最强大的翻译插件,支持中文替换英文,解决起英文变量名难的重度患者 Key Promoter X 所有操作的快捷键提示,忘记鼠标真的 Cyan Light Theme A light theme,偏青色对眼睛很柔和舒服,黑暗主题实在不适应 反馈与建议 2012年java程序员可以说非常吃香,今年2019从业人数暴增,职业发展挑战变得越来越大!现在流行自动构建和自动部署CI,开发运维一体化docker,整个互联网都在追求敏捷开发的今天。掌握一款追求效率功能的IDE非常重要,很多群和公众号对ide和Eclipse争议很大。但请记住斯大林名言——落后就要挨打,ide本身代表高效,但是插件也别装太多,免得启动还要半天哈哈哈😀 (首推)慕课网免费教程:IntelliJ IDEA神器使用技巧 (推荐)尚硅谷IDEA视频教程:链接:https://pan.baidu.com/s/11biVBv9EI9yfL6Cee0r0LQ,密码:n7hn 看完上面两个教程,你会怀疑自己用的idea是假的,原来写代码还可以这样的。 邮箱:caochikai@qq.com","tags":[{"name":"tool","slug":"tool","permalink":"https://caochikai.github.io/tags/tool/"},{"name":"plugin","slug":"plugin","permalink":"https://caochikai.github.io/tags/plugin/"},{"name":"idea","slug":"idea","permalink":"https://caochikai.github.io/tags/idea/"}]},{"title":"springboot整合elasticsearch","date":"2019-05-21T13:40:31.000Z","path":"2019/05/21/springboot整合elasticsearch/","text":"springboot整合elasticsearch一、概念 elasticsearch官网是一个分布式多用户能力的全文搜索引擎,也是一个具有RESTful web接口的java应用。目前开源软件商业比较不错的例子,与Solr一样都是基于Lucene,大数据hadoop也是脱胎于Lucene。Solr开源而且生态比较成熟,elasticsearch目前最火也是商业应用方面非常好的搜索引擎。 3w原则: question what:常见站内/app内搜索服务需求:商品文章的模糊搜索,精确搜索,拼音搜索 。 question why:借助elasticsearch和analysis-ik中文分词器,快速实现搜索服务功能。 how:在微服务当中,通常利用mq消息中间件来同步数据集群搜索服务(脚手架里没有mq),借助ElasticsearchTemplate(spring 模板工具类强大)API维护索引和搜索查询。 二、落地实现 根据码云企业级搜索脚手架的文档可知,注意版本为Springboot2.1.1+elasticsearch6.5.3,elasticsearch和analysis-ik插件版本必须统一,而且新版本elasticsearch 7不适用于该工程。这个参考工程的中文分词搜索效果不太理想,一般富文本的内容进入索引之前要利用字符过滤器清洗不正常的字符。通常为了保证索引时覆盖度和搜索时准确度,索引分词器采用ik_max_word,搜索分析器采用ik_smart模式。具体elasticsearch6.5.3的安装过程请参考码云的README.md,目前正在在公司项目使用请放心,单元测试的效果也非常nice。 1、添加依赖1234<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-data-elasticsearch</artifactId></dependency> 2、配置引导类12345#============================# 默认的节点名称elasticsearchspring.data.elasticsearch.cluster-name=my-application# elasticsearch 调用地址,多个使用“,”隔开spring.data.elasticsearch.cluster-nodes=localhost:9300 3、DWMQSender包装rabbitTemplate发送消息同步数据123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778//服务类的简写如下{//注入sender @AutowiredDWMQSender sender;//发送写法ArticlesMessage extends DWMQMessage<消息内容类型>JSONObject jsonObject = JSON.parseObject(JSON.toJSONString(articles)); jsonObject.put(Groups.ACTION, Groups.ADD); sender.sendMessage(new ArticlesMessage(jsonObject));}//封装发送mq messageimport com.alibaba.fastjson.JSON;import com.dwalk.common.exception.EU;import com.dwalk.common.mq.mto.DWMQMessage;import com.dwalk.common.utils.SU;import com.rabbitmq.client.Channel;import lombok.extern.slf4j.Slf4j;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import org.springframework.amqp.AmqpException;import org.springframework.amqp.core.Message;import org.springframework.amqp.core.MessagePostProcessor;import org.springframework.amqp.rabbit.annotation.RabbitHandler;import org.springframework.amqp.rabbit.core.RabbitTemplate;import org.springframework.amqp.rabbit.support.CorrelationData;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.stereotype.Component;import java.time.LocalDateTime;import java.util.concurrent.atomic.AtomicLong;/** * 消息成功发送到MQ服务器后的回调确认 */@Slf4j@Componentpublic class DWMQSender { @Autowired RabbitTemplate rabbitTemplate; @Autowired DWMQRetry retry; // public void sendMessage(DWMQMessage mto) { if( mto.getObj()==null ) { EU.te(\"消息内容为空\"); } if( SU.isNull( mto.getRoutingKey()) ) { EU.te(\"路由规则为空\"); } log.info(String.format(\"准备发送【%s】\", mto.getInfo())); mto.setId(retry.generateId()); if( SU.isNull(mto.getExchange()) && mto.getExpire()<1 ) { //默认的没有交换器,则直接发送到指定的队列 rabbitTemplate.convertAndSend(mto.getRoutingKey(), mto.getObj(), new CorrelationData(mto.getId())); } else if (mto.getExpire()<1){ //经过交换器,按路由规则匹配队列 rabbitTemplate.convertAndSend(mto.getExchange(), mto.getRoutingKey(), mto.getObj(), new CorrelationData(mto.getId())); } else { //默认的没有交换器,则直接发送到指定的队列,发送延迟消息 rabbitTemplate.convertAndSend(mto.getRoutingKey(), mto.getObj(), message -> { message.getMessageProperties().setExpiration(mto.getExpire()+\"\"); return message; }, new CorrelationData(mto.getId())); } mto.setCtime(System.currentTimeMillis()); retry.add(mto, this); }} 4、RabbitListener接收到消息同步数据123456789101112131415161718192021222324252627282930313233343536373839/** * 搜索服务接收到管理后台用户修改文章同步消息 */@Component@Slf4j//配置mq消息队列,接收文件同步消息@RabbitListener(queues = DirectMQConfig.DIRECT_ARTICLES_ELASTIC_QUEUE)public class ArticlesReceiver extends DWMQBaseReceiver<String> { @Autowired private ArticleETOService etoService; @Override public Class getClazz() { return String.class; } @Override public boolean processMessage(String mto) { log.info(\"Received 管理后台用户-修改文章同步消息接收:{}\", mto); JSONObject jsonObject = JSON.parseObject(mto); String action = jsonObject.getString(Groups.ACTION); //删除操作要删除索引,更新操作先删除后 ArticleETO articles = JSON.parseObject(jsonObject.toJSONString(), ArticleETO.class); String articlesId = articles.getId(); switch (action) { case Groups.ADD: etoService.save(articles); break; case Groups.UPDATE: etoService.delete(articlesId); etoService.save(articles); break; case Groups.DELETE: etoService.delete(articlesId); break; } return true; }} 5、公共搜索方法12345678910111213141516171819202122232425262728/** * 高亮显示,返回分页 * @auther: zhoudong * @date: 2018/12/18 10:29 */ @Override public IPage<Map<String, Object>> queryHitByPage(int pageNo, int pageSize, String keyword, String indexName, String... fieldNames) { // 构造查询条件,使用标准分词器. QueryBuilder matchQuery = createQueryBuilder(keyword, fieldNames); // 设置高亮,使用默认的highlighter高亮器 HighlightBuilder highlightBuilder = createHighlightBuilder(fieldNames); // 设置查询字段 SearchResponse response = elasticsearchTemplate.getClient().prepareSearch(indexName) .setQuery(bool) .highlighter(highlightBuilder) .setFrom((pageNo - 1) * pageSize) .setSize(pageNo * pageSize) // 设置一次返回的文档数量,最大值:10000 .get(); // 返回搜索结果 SearchHits hits = response.getHits(); Long totalCount = hits.getTotalHits(); IPage<Map<String, Object>> page = new Page<>(pageNo, pageSize, totalCount); page.setRecords(getHitList(hits)); return page; } 反馈与建议 今天终于出了elasticsearch文章,以后我会在对应专题的文章放出相关的百度云资源,这些都是网上流传比较广的资源,想找好的学习资源也可以与我合伙买绝版视频,有钱买正版吧(作者很穷,找工作从来没有造假包装,只能混成这个卵样😭,世道维艰,如果不是感觉做码农还算有点天赋,早就转行了)。 百度云 :下载街/01.Elasticsearch顶尖高手系列课程,密码:iw7f 邮箱:caochikai@qq.com","tags":[{"name":"spring","slug":"spring","permalink":"https://caochikai.github.io/tags/spring/"},{"name":"elasticsearch","slug":"elasticsearch","permalink":"https://caochikai.github.io/tags/elasticsearch/"}]},{"title":"Springboot整合Quartz定时器","date":"2019-05-20T12:25:37.000Z","path":"2019/05/20/Spring boot整合Quartz定时器/","text":"Spring boot整合Quartz定时器一、概念 quartz官网是一个完全由 Java 编写的开源作业调度框架,结合数据库甚至可以做到分布式调度。目前参考的RuoYi后台脚手架的定时任务模块,支持在线(添加、修改、删除)任务调度,并记录执行日志作业结果。 3w原则: question what:定时任务,比如定时答题、商家结算等需求,并且支持立即运行、暂停和禁止。 question why:借助quartz springboot生态和后台脚手架,快速实现定时调度功能。 how:实现JobDetail运行任务详情,Trigger 触发器定义触发规则,Scheduler 调度中心/容器注册多个 JobDetail 和 Trigger。Trigger 与 JobDetail 组合即可被Scheduler调用。 二、落地实现 根据若依脚手架的文档可知,定时任务工程模块为ruoyi-quartz,结合sql/quartz.sql导入关于定时器数据库表。当然这种做法需要数据库和bootstrap,为了简化,我采取的替代方案是保留定时和立即执行功能,抛弃手动在代码硬编码新加定时器,web管理面板则通过swagger触发任务调度立即执行一次。极端偷懒方式,@Scheduled(cron = “”)放在在cotroller方法,同事推荐给我的😀。 1、添加依赖1234<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-quartz</artifactId></dependency> 2、配置引导类1234567//模块加载@EnableScheduling@EnableSwagger2@SpringBootApplicationpublic class WeixinApplication {......} 3、注册JobDetail和Trigger1234567891011121314151617181920212223/** * 在线表达式:http://cron.qqe2.com/ */@Slf4j@Configurationpublic class QuartzConfig { public static final String TASK_CLASS_NAME = \"reportNowTask\"; @Bean public JobDetail reportNowTask() { return JobBuilder.newJob(reportNowTask.class).withIdentity(TASK_CLASS_NAME).storeDurably().build(); } @Bean public Trigger reportNowTaskTrigger(JobDetail reportNowTask) { //cronSchedule等于@Scheduled(cron = \"\"),但是通过注解无法配置jobkey return TriggerBuilder.newTrigger().forJob(reportNowTask) .withIdentity(\"reportNowTaskTrigger\") .withSchedule(CronScheduleBuilder.cronSchedule(\"9 0 19 * * ?\")) .build(); }} 4、任务详情继承QuartzJobBean或者实现Job接口12345678@Slf4jpublic class reportNowTask extends QuartzJobBean { @Override protected void executeInternal(JobExecutionContext context) throws JobExecutionException { ......//任务内容 }} 5、立即执行123456789101112131415161718@Slf4j@Api(tags = \"问题模块\")@RequestMapping(value = \"question\")@RestControllerpublic class QuestionController { /** * 任务调度立即执行一次 */ @PostMapping(\"/run\") @ResponseBody public ResponseEntity run() throws SchedulerException { //api秘诀就在这里根据QuartzConfig jobKey触发作业调度 scheduler.triggerJob(JobKey.jobKey(QuartzConfig.TASK_CLASS_NAME)); return ResponseEntity.ok(\"执行成功!\"); }} 反馈与建议 今天粗略简易版的定时任务,功能强大的请查看若依后台脚手架,github和码云有很多类似脚手架,但是我们有选择性copy学习才是重点😊。 邮箱:caochikai@qq.com","tags":[{"name":"spring","slug":"spring","permalink":"https://caochikai.github.io/tags/spring/"},{"name":"quartz","slug":"quartz","permalink":"https://caochikai.github.io/tags/quartz/"}]},{"title":"springboot源码分析之启动过程","date":"2019-05-19T13:50:35.000Z","path":"2019/05/19/springboot源码分析之启动过程/","text":"springboot源码分析之启动过程一、概念 计划写一波springboot 2.x源码分析,只写实用性比较高的特性,从GitHub上看出更新频率在一个月左右,更新极快非常活跃。 版本发行: 版本 时间线 说明 v0.5.0.M1 2013-08-06 第一个版本 v2.2.0.M3 2019-05-15 当前最新版本 二、源码分析 SpringBoot的启动引导类写法多样,标记了@SpringBootApplication的class作为源类,代码如下: 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253//简易版@SpringBootApplication public class MyApplication { public static void main(String[] args) { SpringApplication.run(MyApplication.class, args); } }//通过 SpringApplicationBuilder API@SpringBootApplicationpublic class DiveInSpringBootApplication { public static void main(String[] args) { new SpringApplicationBuilder(DiveInSpringBootApplication.class) .bannerMode(Banner.Mode.CONSOLE) .web(WebApplicationType.NONE) .profiles(\"prod\") .headless(true) .run(args); }}//声明newpublic class SpringApplicationBootstrap { public static void main(String[] args) {// SpringApplication.run(ApplicationConfiguration.class,args); Set sources = new HashSet(); // 配置Class 名称 sources.add(ApplicationConfiguration.class.getName()); SpringApplication springApplication = new SpringApplication(); //配置源 springApplication.setSources(sources); //配置控制台banner springApplication.setBannerMode(Banner.Mode.CONSOLE); //声明web类型 springApplication.setWebApplicationType(WebApplicationType.NONE); //多环境配置激活 springApplication.setAdditionalProfiles(\"prod\"); //java.awt.headless禁用模式 springApplication.setHeadless(true); springApplication.run(args); } @SpringBootApplication public static class ApplicationConfiguration { }} 从代码上可以看出,调用了SpringApplication的静态方法run。这个run方法会构造一个SpringApplication的实例,然后再调用这里实例的run方法就表示启动SpringBoot。因此,想要分析SpringBoot的启动过程,我们需要熟悉SpringApplication的构造过程以及SpringApplication的run方法执行过程即可。 SpringApplication的准备过程 配置 Spring Boot Bean 源:Java 配置 Class 或 XML 上下文配置文件集合,用于 Spring Boot BeanDefinitionLoader 读取 ,并且将配置源解析加载为Spring Bean 定义。 推断 Web 应用类型:根据当前应用 ClassPath 中是否存在相关实现类来推断 Web 应用的类型。参考方法:org.springframework.boot.SpringApplication#deduceWebApplicationType。 123456789101112private WebApplicationType deduceWebApplicationType() { if (ClassUtils.isPresent(REACTIVE_WEB_ENVIRONMENT_CLASS, null) && !ClassUtils.isPresent(MVC_WEB_ENVIRONMENT_CLASS, null)) { return WebApplicationType.REACTIVE; } for (String className : WEB_ENVIRONMENT_CLASSES) { if (!ClassUtils.isPresent(className, null)) { return WebApplicationType.NONE; } } return WebApplicationType.SERVLET;} 推断引导类(Main Class):根据 Main 线程执行堆栈判断实际的引导类。参考方法: org.springframework.boot.SpringApplication#deduceMainApplicationClass 123456789101112131415private Class deduceMainApplicationClass() { try {//获取堆栈输出方法名称 StackTraceElement[] stackTrace = new RuntimeException().getStackTrace(); for (StackTraceElement stackTraceElement : stackTrace) { if (\"main\".equals(stackTraceElement.getMethodName())) { return Class.forName(stackTraceElement.getClassName()); } } } catch (ClassNotFoundException ex) { // Swallow and continue } return null; } 加载应用上下文初始器 ( ApplicationContextInitializer ):利用 Spring 工厂加载机制,实例化 ApplicationContextInitializer 实现类,并排序对象集合。 1234567891011121314//实现类: org.springframework.core.io.support.SpringFactoriesLoader//配置资源: META-INF/spring.factories//排序: AnnotationAwareOrderComparator#sortprivate Collection getSpringFactoriesInstances(Class type, Class[] parameterTypes, Object... args) { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); // Use names and ensure unique to protect against duplicates Set names = new LinkedHashSet<>( SpringFactoriesLoader.loadFactoryNames(type, classLoader)); List instances = createSpringFactoriesInstances(type, parameterTypes, classLoader, args, names); AnnotationAwareOrderComparator.sort(instances); return instances; }· 加载应用事件监听器( ApplicationListener ):利用 Spring 工厂加载机制,实例化 ApplicationListener 实现类,并排序对象集合。 SpringApplication 运行阶段 加载 SpringApplication 运行监听器( SpringApplicationRunListeners ):利用 Spring 工厂加载机制,读取 SpringApplicationRunListener 对象集合,并且封装到组合类SpringApplicationRunListeners。 运行 SpringApplication 运行监听器( SpringApplicationRunListeners ): started(run方法执行的时候立马执行;对应事件的类型是ApplicationStartedEvent) environmentPrepared(ApplicationContext创建之前并且环境信息准备好的时候调用;对应事件的类型是ApplicationEnvironmentPreparedEvent) contextPrepared(ApplicationContext创建好并且在source加载之前调用一次;没有具体的对应事件) contextLoaded(ApplicationContext创建并加载之后并在refresh之前调用;对应事件的类型是ApplicationPreparedEvent) finished(run方法结束之前调用;对应事件的类型是ApplicationReadyEvent或ApplicationFailedEvent) 创建 Spring 应用上下文( ConfigurableApplicationContext ):根据准备阶段的推断 Web 应用类型创建对应ConfigurableApplicationContext 实例: Web Reactive: AnnotationConfigReactiveWebServerApplicationContext Web Servlet: AnnotationConfigServletWebServerApplicationContext 非 Web: AnnotationConfigApplicationContext 创建 Environment:根据准备阶段的推断 Web 应用类型创建对应的 ConfigurableEnvironment 实例。 Web Reactive: StandardEnvironment Web Servlet: StandardServletEnvironment 非 Web: StandardEnvironment run方法分析1234567891011121314151617181920212223242526272829303132public ConfigurableApplicationContext run(String... args) { StopWatch stopWatch = new StopWatch(); // 构造一个任务执行观察器 stopWatch.start(); // 开始执行,记录开始时间 ConfigurableApplicationContext context = null; configureHeadlessProperty(); // 获取SpringApplicationRunListeners,内部只有一个EventPublishingRunListener SpringApplicationRunListeners listeners = getRunListeners(args); // 上面分析过,会封装成SpringApplicationEvent事件然后广播出去给SpringApplication中的listeners所监听 // 这里接受ApplicationStartedEvent事件的listener会执行相应的操作 listeners.started(); try { // 构造一个应用程序参数持有类 ApplicationArguments applicationArguments = new DefaultApplicationArguments( args); // 创建Spring容器 context = createAndRefreshContext(listeners, applicationArguments); // 容器创建完成之后执行额外一些操作 afterRefresh(context, applicationArguments); // 广播出ApplicationReadyEvent事件给相应的监听器执行 listeners.finished(context, null); stopWatch.stop(); // 执行结束,记录执行时间 if (this.logStartupInfo) { new StartupInfoLogger(this.mainApplicationClass) .logStarted(getApplicationLog(), stopWatch); } return context; // 返回Spring容器 } catch (Throwable ex) { handleRunFailure(context, listeners, ex); // 这个过程报错的话会执行一些异常操作、然后广播出ApplicationFailedEvent事件给相应的监听器执行 throw new IllegalStateException(ex); }} 反馈与建议 为了快大多分析的不好写的很乱,凑合看下我以后改下排版😂。 邮箱:caochikai@qq.com","tags":[{"name":"spring","slug":"spring","permalink":"https://caochikai.github.io/tags/spring/"},{"name":"quartz","slug":"quartz","permalink":"https://caochikai.github.io/tags/quartz/"}]},{"title":"地理定位业务实现","date":"2019-05-18T12:19:01.000Z","path":"2019/05/18/地理定位业务实现/","text":"地理定位业务实现一、概念3w原则: question what:a、附近一定范围的目标(电子围栏);b、该经纬度的地理位置名称(省市县街道)。 why:解决上述问题的本质是获得经纬度,途径为硬件、GPS定位服务、基站定位,地理位置通过百度、谷歌、腾讯地图,基本所有地图免费版都有日访问量限制。 how:通过安卓或者IOS获取经纬度,再借助百度地图接口获取地理位置,距离也可通过接口或者谷歌地图算法。 GPS是英文Global Positioning System(全球定位系统)的简称。 二、解决方式 场景:小程序获取附近的好友,微信官方文档 wx.getLocation(Object object)。 现象:需要用户授权,前端获得gps 坐标通过接口传数据后台,保存到用户表(1:1关系)。 地图选择:其实纯前端基本也能解决基本问题,腾讯地图对小程序支持最好,根据JavaScript SDK文档可以拥有如下功能:绘制地图,地点搜索,关键词输入提示,逆地址解析(坐标位置描述),地址解析(地址转坐标,路线规划,距离计算,获取城市列表,获取城市区县。 业务前提:用户必须授权才能使用该功能,当拥有所有用户经纬度,通过数据库语句获取当前用户经纬度在一定距离,并且可以排行。 三、落地编码sql版例子:123456789101112131415161718192021222324252627#mysql版,根据谷歌地图公式计算点歌经纬度之间距离,单位为m(米)select e.id, e.longitude, e.latitude, ROUND( 6378.138 * 2 * ASIN( SQRT( POW( SIN( ( e.latitude * PI() / 180 - 23.12463 * PI() / 180 ) / 2 ), 2 ) + COS(e.latitude * PI() / 180) * COS(23.12463 * PI() / 180) * POW( SIN( ( e.longitude * PI() / 180 - 113.36189 * PI() / 180 ) / 2 ), 2 ) ) ) * 1000 ) AS distanceFROM dw_dbei_user ehaving distance < 4000 工具类(获取两点距离):12345678910111213141516171819202122232425262728293031323334public class MapUtils { //private static double EARTH_RADIUS = 6378.137; private static double EARTH_RADIUS = 6371.393; private static double rad(double d) { return d * Math.PI / 180.0; } /** * 计算两个经纬度之间的距离 * * @param lat1 纬度1 * @param lng1 经度1 * @param lat2 纬度2 * @param lng2 经度2 * @return 计算结果单位:米 */ public static double GetDistance(double lat1, double lng1, double lat2, double lng2) { double radLat1 = rad(lat1); double radLat2 = rad(lat2); double a = radLat1 - radLat2; double b = rad(lng1) - rad(lng2); double s = 2 * Math.asin(Math.sqrt(Math.pow(Math.sin(a / 2), 2) + Math.cos(radLat1) * Math.cos(radLat2) * Math.pow(Math.sin(b / 2), 2))); s = s * EARTH_RADIUS; s = Math.round(s * 1000); return s; } public static void main(String[] args) { double v = GetDistance(113.36199, 23.12463, 113.36189, 23.12463); System.out.println(v); }} 反馈与建议 尽量每天不断更,做个自律者,markdown原文件在github里面,感谢各位大佬看官star,面试我要往脸上贴金哈哈哈😂。 邮箱:caochikai@qq.com","tags":[{"name":"sql","slug":"sql","permalink":"https://caochikai.github.io/tags/sql/"},{"name":"im","slug":"im","permalink":"https://caochikai.github.io/tags/im/"}]},{"title":"初探缓存","date":"2019-05-17T15:34:21.000Z","path":"2019/05/17/初探缓存/","text":"多级缓存架构缓存设计理念: 缓存常用的对象或者数据,减少系统开销提高效率。 缓存命中率 即从缓存中读取数据的次数 与 总读取次数的比率,命中率越高越好: 缓存策略: 移除策略:FIFO(First In First Out),LRU(Least Recently Used),LFU(Least Frequently Used)。 TTL(Time To Live):缓存存活期 TTI(Time To Idle):空闲存活期 spring cache 一、概念 自Spring 3.1起,提供注解缓存,并且提供事务回滚时也自动回滚缓存,并且支持SPEL表达式。 二、入门代码1、添加依赖,例如maven的pom.xml(Springboot); 1234<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-cache</artifactId></dependency> 2、添加一种cacheManager的bean实现类,常见ConcurrentMapCache、EhCacheCache、RedisCache; 123456@Beanpublic CacheManager cacheManager() { SimpleCacheManager cacheManager = new SimpleCacheManager(); cacheManager.setCaches(Collections.singletonList(new ConcurrentMapCache("models"))); return cacheManager;} 3、配置模块加载注解@EnableCaching 三、主要注解1、@Cacheable:将方法返回值作为缓存 value (也可使用 cacheNames) : 可看做命名空间,表示存到哪个缓存里了。 key : 表示命名空间下缓存唯一key,使用Spring Expression Language(简称SpEL,详见参考文献[5])生成。 condition : 表示在哪种情况下才缓存结果(对应的还有unless,哪种情况不缓存),同样使用SpEL 2、@CacheEvict:删除缓存注解 3、@CachePut:刷新注解 ehcache 一、概念二、入门代码1、缓存分组,要对分组进行全新CacheConfiguration ,为了高效使用配置自定义属性提取器。默认的属性处理器是JavaBeanAttributeExtractor。 123456789101112131415161718192021222324252627282930@Bean public EhCacheGroupBeanPostProcessor addCache() { System.out.println(".......添加缓存组........"); return new EhCacheGroupBeanPostProcessor(); } //后置处理器 public static class EhCacheGroupBeanPostProcessor implements BeanPostProcessor { @Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { //根据前面初始化完成的beanName进一步操作 if(beanName.equals("appEhCacheCacheManager") ) { EhCacheCacheManager manager = (EhCacheCacheManager)bean; CacheManager cacheManager = manager.getCacheManager();// 文章缓存命中配置needUpdate CacheConfiguration configuration = new CacheConfiguration(ReadCacheNames.文章缓存,10000); Searchable searchable = new Searchable(); searchable.setKeys(false); searchable.setValues(false); //动态索引 searchable.setAllowDynamicIndexing(true); searchable.addSearchAttribute(new SearchAttribute().name("needUpdate").className("com.dwalk.social.common.util.ArticlesAttributeExtractor")); configuration.eternal(true).addSearchable(searchable); Cache articlesCache = new Cache(configuration); cacheManager.addCache(articlesCache); cacheManager.addCache(ReadCacheNames.热点文章缓存); } return bean; } } 2、使用spring内置定时器,并使用ehcache查询api进行缓存查询。 123456789101112131415161718192021222324 @Scheduled(cron = "0 0/1 * * * ?") private void synchronize() { Cache cache = cacheManager.getCache(ReadCacheNames.文章缓存); int size = cache.getSize(); if (size > 0) { Query query = cache.createQuery(); Attribute searchAttribute = cache.getSearchAttribute("needUpdate"); //指定查询的 query.includeAttribute(searchAttribute); query.includeValues(); Results execute = query.addCriteria(searchAttribute.eq(true)).execute(); List all = execute.all(); log.info("查询文章缓存的大小:{}", all.size()); for (Result result : all) { ArticlesDTO articles = (ArticlesDTO) result.getValue(); articles.setNeedUpdate(false); Articles target = new Articles();// 同步浏览量、视频播放量、评论数、点赞数、收藏数 target.setVisitorNum(articles.getVisitorNum()).setCommentNum(articles.getCommentNum()). setPlayNum(articles.getPlayNum()).setLikeNum(articles.getLikeNum()).setCollectNum(articles.getCollectNum()); articlesService.updateById(target); } } } 三、总结1、需要熟悉spring接口设计,以接口使用框架,要不然官方api使用需了解诸多细节。 四、一二级缓存 当遇到@Cacheable返回为null记录,为了成功序列化null,使用了org.springframework.cache.support.NullValue对象代替null。 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889com.alibaba.fastjson.JSONException: autoType is not support. org.springframework.cache.support.NullValue1、起源一二级缓存重写get()方法public class EhRedisCache extends AbstractValueAdaptingCache { 部分代码省略…… @Override public T get(Object key, Callable valueLoader) { try { lock.lock(); value = lookup(key); if(value != null) { return (T) value; } value = valueLoader.call(); //toStoreValue是AbstractValueAdaptingCache抽象类的方法 Object storeValue = toStoreValue(value); put(key, storeValue); return (T) value; } catch (Exception e) { throw new ValueRetrievalException(key, valueLoader, e.getCause()); } finally { lock.unlock(); } }}2、toStoreValue判断userValue == null 则return NullValue.INSTANCE -> public static final Object INSTANCE = new NullValue();public abstract class AbstractValueAdaptingCache implements Cache { protected Object toStoreValue(@Nullable Object userValue) { if (userValue == null) { if (this.allowNullValues) { return NullValue.INSTANCE; } throw new IllegalArgumentException( "Cache '" + getName() + "' is configured to not allow null values but null was provided"); } return userValue; }}3、序列化反序列化public class FastJsonRedisSerializer implements RedisSerializer { @Override public T deserialize(byte[] bytes) throws SerializationException { if (null == bytes || bytes.length <= 0) { return null; } String str = new String(bytes, DEFAULT_CHARSET); return (T) JSON.parseObject(str, clazz); }}4、jsonParser解析类型public class com.alibaba.fastjson.parser.DefaultJSONParser implements Closeable { public final Object parseObject(final Map object, Object fieldName) { 部分代码省略…… Class clazz = null; if (object != null && object.getClass().getName().equals(typeName)) { clazz = object.getClass(); } else { //com.alibaba.fastjson.parser.DefaultJSONParser#config执行 clazz = config.checkAutoType(typeName, null, lexer.getFeatures()); } }}5、异常抛出点TypeUtils.getClassFromMapping(typeName) -》 typeName为org.springframework.cache.support.NullValuepublic Class checkAutoType(String typeName, Class expectClass, int features) { if (Arrays.binarySearch(denyHashCodes, hash) >= 0 && TypeUtils.getClassFromMapping(typeName) == null) { throw new JSONException("autoType is not support. " + typeName); }}6、TypeUtils的getClassFromMapping方法返回null public static Class getClassFromMapping(String className){ return mappings.get(className); }7、TypeUtils不支持org.springframework.cache.support.NullValueprivate static ConcurrentMap> mappings = new ConcurrentHashMap>(16, 0.75f, 1);//mappings类型白名单private static void addBaseClassMappings(){ mappings.put("byte", byte.class); mappings.put("short", short.class); mappings.put("int", int.class); mappings.put("long", long.class); mappings.put("float", float.class); mappings.put("double", double.class); mappings.put("boolean", boolean.class); 部分代码省略…… fastJson官方没有支持org.springframework.cache.support.NullValue} 反馈与建议 今天复出写博客,一是感觉懒了期望进步,二是为了积累知识方便copy😂。 邮箱:caochikai@qq.com","tags":[{"name":"spring","slug":"spring","permalink":"https://caochikai.github.io/tags/spring/"}]},{"title":"AndroidAdb","date":"2017-02-05T12:49:09.000Z","path":"2017/02/05/AndroidAdb/","text":"Android adb调试工具–清除锁屏密码误解@(Android)[调试工具|实用教程|adb命令] Android Studio ADB官方文档 ADB全称为 Android Debug Bridge, 是android 里的一个调试工具, 用这个工具可以直接操作管理android模拟器或者真实的andriod设备手机。如果你安装了Android SDK(或者下载adb工具包,体积小),存放sdk的platform-tools目录下,在 命令行cmd使用需要配置路径(android_sdk/platform-tools/adb.exe )到环境变量里。它可为各种设备操作提供便利,如安装和调试应用,例如查看android中的数据库,提供对 Unix shell(可用来在模拟器或连接的设备上运行各种命令)的访问。该工具作为一个客户端-服务器程序,包括三个组件: 客户端 :该组件发送命令。客户端在开发计算机上运行。您可以通过发出 adb 命令从命令行终端调用客户端; 后台程序 :该组件在设备上运行命令。后台程序在每个模拟器或设备实例上作为后台进程运行; 服务器 :该组件管理客户端和后台程序之间的通信。服务器在开发计算机上作为后台进程运行。 关于清除解锁图案 用户相关的文件accounts.db(gmail账号管理),gesture.key(手势识别文件),password.key(密码文件)。不同品牌手机系统相关文件名也会不同,例如我的手机,华为4x文件为locksettings.db(数据库文件)。修理店里的师傅使用一个工具叫星海神器(高通平台强刷),功能超乎想象,几乎支持所有手机品牌(特别是苹果)淘宝上有卖,网上大部分加密过!小米手机叫丢失解锁神器! 1. 破解条件 手机打开USB并连接电脑 手机被ROOT,并且ADB可以直接升级为ROOT用户 配置adb路径到环境变量或者直接在cmd命令行里面切换到adb所在路径 2. 破解步骤 *打开cmd命令行,用【adb shell】命令进入shell * 利用su命令将adb提升为root用户,如果成功,行首由$变成 #,#表示root用户 *进入data/system目录 * *ls查看当前目录 * *用ls命令查看密码文件 * 用rm命令删除密码文件,若是$(不是root),则会提示”rm failed for … Permission denied”,权限不足 12345$ adb shell $ su# cd /data/system# ls# rm locksettings.db 输入reboot或手动重启手机生效。亲测华为荣耀4X有效,删除锁屏密码后,指纹解锁自动失效,所以此方法也可以破解指纹解锁!!重新设置锁屏密码后,以前设置的指纹解锁又可以用了。 3. 注意事项看到这儿就明白了,即便手机Root+打开USB调试,也是无法通过ADB解锁手机的。因为想要想要解锁,就得删除/data/system 下的相关文件,可删除需要由Superuser或者kingroot授予ADB shell权限,而授权需要解锁打开手机后操作Superuser程序。即解锁需要用到解锁后的手机操作,就像春晚小品《开锁》中,业主黄宏要求开锁师傅林永健开锁,林永健要求黄宏出示有效证件,可证件就在锁着的箱子里头。 我进行了测试后发现,在授权过一次后,下次手机用USB数据线连接电脑,再次进行解锁,即便同台电脑,也是需要再次授权的。这就说明,即便你用你的电脑经过手机授权解锁过,过后想要在忘记密码时使用ADB方式解锁,也是不可能的。 我觉得这是高版本的安卓系统(eg. Android 4.2 Jelly Bean 安卓果冻豆)新有的安全特性,低版本的Android如安卓2.3.8是可以通过这种方法解锁的。因为我实际测试,我的固件版本为安卓2.3.8的三星S5570(已经Root,打开USB调试),执行命令rm gesture.key,无需授权,直接即可解锁。现在我有个问题,低版本安卓系统如 Android2.3.8的手机解锁屏幕锁定密码,是否的确必须Root,还是只要打开USB调试即可?我手头没有没Root的Android2.3手机,也懒得折腾了,就不管它了。 这样看来,高版本的安卓系统也就不存在被非手机所有者恶意解锁的BUG了。4. 题外话(今年好东西都挂了)收费音乐神器官网,服务器接口平台AnyListen 音乐间谍为window PC版,音乐助手Android版,Shelher微博分享出来3.3版源码百度云 密码gria!尴尬的是朋友云免流量也不干了!","tags":[]},{"title":"SpiderWebMagic","date":"2017-01-30T12:50:27.000Z","path":"2017/01/30/SpiderWebMagic/","text":"WebMagic爬虫框架–京东图书@(爬虫)[框架|爬虫|Demo总结] WebMagic!项目代码分为核心(webmagic-core)和扩展(webmagic-extension)两部分(jar包)。Downloader、PageProcessor、Scheduler、Pipeline这四大组件对应爬虫生命周期中的下载、处理、管理和持久化等功能。 名称 功能 Downloader 基础。利用httpClient作为下载工具,下载页面内容便于后续处理解析; Page 网页内容对象。 指根据url下载到的页面内容,包括页面dom元素,css样式,javascript等; Pageprocess 爬虫的核心。 负责解析页面,抽取有用信息,可采用css(),$(),xpath()方法对特定页面元素进行抽取; Site 网站设置。设置网站domain,cookies,header,重试次数,访问间隔时间等; Scheduler 抓取页面队列。 管理待抓取的URL,以及一些去重的工作,将目标url内容push到抓取队列中; Pipeline 输出,收尾。 负责抽取结果的处理,包括计算、持久化到文件、数据库; Spider 爬虫的入口类 采用链式设计,通过它来设定多线程,页面解析器,调度以及输出方式等。 WebMagic官方链接: 官网 包含官方文档和源码,以及相应的实例; github 仓库保存最新版本; oschinamayun码云 包含所有编译好的依赖包; 爬取京东图书(https://book.jd.com/)在商品列表网页抓取如下商品信息 商品名:商品名称 商品网页:显示商品详细信息的网页地址。 市场价格:京东给出的市面价格 京东价格:京东的优惠价。 关于ajax价格链接地址格式:http://p.3.cn/prices/mgets?skuIds=J_ + 商品ID,抓取格式为json。 https://item.jd.com/12087016.html :图书详情页; http://p.3.cn/prices/mgets?skuIds=J_12087016 (例)价格ajax请求链接; [{“id”:”J_12087016”,”p”:”60.80”,”m”:”90.00”,”op”:”60.80”}] 抓取结果 id + 京东实际价格+市场价格 代码块123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778import us.codecraft.webmagic.Page;import us.codecraft.webmagic.Site;import us.codecraft.webmagic.Spider;import us.codecraft.webmagic.processor.PageProcessor;import java.util.ArrayList;import java.util.List;public class JDPageProcesser implements PageProcessor { private Site site = Site.me().setRetryTimes(3).setSleepTime(3000).setCharset(\"GBK\"); private static int size = 0;// 共抓取到的图书数量 //抓取商品信息集合 private static List<String> name = new ArrayList<String>();//所有的书名 private static List<String> author = new ArrayList<String>();//所有的作者 private static List<Double> prices = new ArrayList<Double>();//所有的价格 @Override public void process(Page page) { //图书主页 // https://item.jd.com/12004057.html if (!page.getUrl().regex(\"https://item.jd.com/\\\\d{8}.html\").match()&!page.getUrl().regex(\"p.3.cn/prices/mgets\").match()) { // 主页中添加商品详情页到计划url List<String> detail = page.getHtml().links().regex(\"//item.jd.com/\\\\d{8}.html\").replace(\"//\", \"https://\").all(); //控制抓取商品的数量 if (detail.size()>0) { for (int i = 0; i < 4; i++) { String url = detail.get(i); System.out.println(\"url:\" + url.replace(\"https://item.jd.com\", \"\").replace(\"/\", \"http://p.3.cn/prices/mgets?skuIds=J_\").replace(\".html\", \"\")); //列队添加一条详情页后面追加一条价格ajax链接 page.addTargetRequest(url); page.addTargetRequest(url.replace(\"https://item.jd.com\", \"\").replace(\"/\", \"http://p.3.cn/prices/mgets?skuIds=J_\").replace(\".html\", \"\")); } } } if (page.getUrl().regex(\"https://item.jd.com/\\\\d{8}.html\").match()) { // 商品详情页 size++; name.add(page.getHtml().xpath(\"//div[@id=name]/h1/text()\").get());//添加书名 author.add(page.getHtml().xpath(\"//div[@id=p-author]/a/text()\").get());//添加作者 } if (page.getUrl().regex(\"p.3.cn/prices/mgets\").match()) { //ajax商品id对应价格json接口 prices.add(Double.parseDouble(page.getHtml().replace(\"&quot\", \"\").regex(\"p;:;.+;,;m\").regex(\"\\\\d+\\\\.\\\\d+\").get()));//添加价格 } } @Override public Site getSite() { return site; } //获取所有信息导入DAO,持久化层待实现 private static void getAll() { System.out.println(\"Size\" + name.size() + author.size() + prices.size()); for (int i = 0; i < name.size(); i++) { JDLog model = new JDLog(); model.setName(name.get(i)); model.setAuthor(author.get(i)); model.setPrices(prices.get(i)); System.out.println(\"书名:\" + model.getName()); System.out.println(\"作者:\" + model.getAuthor()); System.out.println(\"价格:\" + model.getPrices()); } } public static void main(String[] args) { long startTime, endTime; System.out.println(\"【爬虫开始】请耐心等待一大波数据到你碗里来...\"); startTime = System.currentTimeMillis(); // 从京东图书开始抓,开启5个线程,启动爬虫 Spider.create(new JDPageProcesser()).addUrl(\"https://book.jd.com/\").thread(3).run(); endTime = System.currentTimeMillis(); getAll(); System.out.println(\"【爬虫结束】共抓取\" + size + \"本图书,耗时约\" + ((endTime - startTime) / 1000) + \"秒,已保存到数据库,请查收!\"); }} 以后将会坚持更新!反馈与建议 邮箱:caochikai@qq.com","tags":[]},{"title":"My New Post","date":"2016-12-24T13:46:06.000Z","path":"2016/12/24/First Wirte/","text":"Markdown初次使用Markdown是一门轻量级语法,仅仅需要5分钟你就能搞懂! 期末刷jsp作业心得 DAO层封装成工具类 :持久层处理业务,尽量把变化东西采用数组遍历,约定成俗简化成配置化写; 登陆注册模块重用 :前端样式变化,但尽量不要修改提交参数名称和个数; 单元测试类 :使用 junit测试框架检查逻辑是否有误,方便部署调试 。 代码块1234567891011121314151617// 执行更新操作----带预编译参数public int excuteUpdate(String sql, String[] params) { int result = 0; try { pstmt = getCon().prepareStatement(sql); for (int i = 0; i < params.length; i++) { pstmt.setString(i + 1, params[i]); } result = pstmt.executeUpdate(); } catch (SQLException e) { e.printStackTrace(); } finally { pstmtClose(); conClose(); } return result;} ##以后将会持续更新,预告 webmagic 框架爬虫下次出 反馈与建议 邮箱:caochikai@qq.com","tags":[]}]