Seata分布式事务环境搭建
下载seata服务端https://github.com/seata/seata/releases修改registry.conf这里使用nacos做注册中心和配置中心, 也就不需要服务端的file.conf了但是使用nacos时, nacos的密码不能有特殊符号, 否则seata可能连接不上(1.5.0已修复)registry {
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "127.0.0.1:8848"
group = "SEATA_GROUP"
namespace = "e794b575-4231-4935-8271-145c5840d392"
cluster = "default"
username = "nacos"
password = "nacos"
}
}
config {
type = "nacos"
nacos {
serverAddr = "127.0.0.1:8848"
namespace = "e794b575-4231-4935-8271-145c5840d392"
group = "SEATA_GROUP"
username = "nacos"
password = "nacos"
dataId = "seataServer.properties"
}
}seata服务端需要的几个表: https://github.com/seata/seata/blob/develop/script/server/db/mysql.sql其他的一些相关的脚本https://github.com/seata/seata/tree/develop/scriptnacos建立命名空间新增配置文件Data ID: seataServer.propertiesGroup: SEATA_GROUPseataServer.properties配置内容### seata
store.mode=db
store.publicKey=
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
store.db.datasource=druid
## mysql/oracle/postgresql/h2/oceanbase etc.
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
## if using mysql to store the data, recommend add rewriteBatchedStatements=true in jdbc connection param
store.db.url=jdbc:mysql://192.168.101.128:3309/seata?rewriteBatchedStatements=true&useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&useSSL=false&zeroDateTimeBehavior=convertToNull&&serverTimezone=Asia/Shanghai
store.db.user=root
store.db.password=123456
store.db.minConn=5
store.db.maxConn=100
store.db.globalTable = global_table
store.db.branchTable = branch_table
store.db.lockTable =lock_table
store.db.queryLimit = 100
store.db.maxWait = 5000
## transport
# tcp udt unix-domain-socket
transport.type=TCP
#NIO NATIVE
transport.server=NIO
#enable heartbeat
transport.heartbeat=true
transport.serialization=seata
transport.compressor=none
transport.threadFactory.bossThreadPrefix = NettyBoss
transport.threadFactory.workerThreadPrefix = NettyServerNIOWorker
transport.threadFactory.serverExecutorThread-prefix = NettyServerBizHandler
transport.threadFactory.shareBossWorker = false
transport.threadFactory.clientSelectorThreadPrefix = NettyClientSelector
transport.threadFactory.clientSelectorThreadSize = 1
transport.threadFactory.clientWorkerThreadPrefix = NettyClientWorkerThread
transport.threadFactory.bossThreadSize = 1
transport.threadFactory.workerThreadSize = default
# 销毁服务器时, 等待几秒钟
transport.shutdown.wait=3
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000单体服务多库事务SpringBoot项目引入依赖<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
<version>1.4.2</version>
</dependency>项目配置文件seata:
application-id: test #这里填你应用的id
service:
grouplist:
# seata-server地址
default: 127.0.0.1:8091
# 分组事务
vgroup-mapping:
global_tx_group: default
enable-degrade: false
disable-global-transaction: false
# 是否开启spring-boot自动装配
enabled: true
# 是否启用数据源 bean 的自动代理
enable-auto-data-source-proxy: true
tx-service-group: global_tx_group
client:
tm:
# 一阶段全局提交结果上报TC重试次数 默认1次,建议大于1
commit-retry-count: 3
# 一阶段全局回滚结果上报TC重试次数 默认1次,建议大于1
rollback-retry-count: 3
rm:
# 是否上报一阶段成功 true、false,从1.1.0版本开始,默认false.true用于保持分支事务生命周期记录完整,false可提高不少性能
report-success-enable: true
# 自动刷新缓存中的表结构 默认false
table-meta-check-enable: true
# 一阶段结果上报TC重试次数
report-retry-count: 5
# 异步提交缓存队列长度 默认10000。 二阶段提交成功,RM异步清理undo队列
async-commit-buffer-limit: 1000
lock:
# 校验或占用全局锁重试间隔 默认10,单位毫秒
retry-interval: 10
# 分支事务与其它全局回滚事务冲突时锁策略 默认true,优先释放本地锁让回滚成功
retry-policy-branch-rollback-on-conflict: true
# 校验或占用全局锁重试次数
retry-times: 30
undo:
# 自定义undo表名 默认undo_log
log-table: seata_undo_log
# 二阶段回滚镜像校验
data-validation: true
# undo log序列化方式
log-serialization: jackson
transport:
type: TCP
server: NIO
heartbeat: true
# client和server通信编解码方式 seata(ByteBuf)、protobuf、kryo、hession、fst,默认seata
serialization: seata
# client和server通信数据压缩方式 none、gzip,默认none
compressor: none
thread-factory:
boss-thread-prefix: NettyBoss
client-worker-thread-prefix: NettyServerNIOWorker
server-executor-thread-prefix: NettyServerBizHandler
client-selector-thread-size: 1
client-selector-thread-prefix: NettyClientWorkerThread简单使用, 配合dynamic-datasource-spring-boot-starter使用@Autowired
StaffMapper staffMapper;
@Override
@GlobalTransactional(rollbackFor = Exception.class)
public void globalTx() {
userService.updateMaster();
userService.updateIndependent();
//模拟异常回滚
int i = 1 / 0;
}
@DS("master")
@Transactional(rollbackFor = Exception.class)
public void updateMaster() {
User user1 = baseDao.selectById(1);
user1.setAge(999);
baseDao.updateById(user1);
User user2 = baseDao.selectById(2);
user2.setAge(999);
baseDao.updateById(user2);
}
@DS("independent")
@Transactional(rollbackFor = Exception.class)
public void updateIndependent() {
Staff staff1 = staffMapper.selectById(1);
staff1.setAge(999);
staffMapper.updateById(staff1);
Staff staff2 = staffMapper.selectById(2);
staff2.setAge(999);
staffMapper.updateById(staff2);
}可以观察到seata_undo_log中的undo记录SELECT CAST(rollback_info AS char) FROM seata_undo_log{
"@class": "io.seata.rm.datasource.undo.BranchUndoLog",
"xid": "192.168.101.1:8091:6593516322371825665",
"branchId": 6593516322371825668,
"sqlUndoLogs": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.undo.SQLUndoLog",
"sqlType": "UPDATE",
"tableName": "staff",
"beforeImage": {
"@class": "io.seata.rm.datasource.sql.struct.TableRecords",
"tableName": "staff",
"rows": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Row",
"fields": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "id",
"keyType": "PRIMARY_KEY",
"type": 4,
"value": [
"java.lang.Long",
1
]
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "name",
"keyType": "NULL",
"type": 12,
"value": "1"
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "age",
"keyType": "NULL",
"type": 4,
"value": 2
}
]
]
}
]
]
},
"afterImage": {
"@class": "io.seata.rm.datasource.sql.struct.TableRecords",
"tableName": "staff",
"rows": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Row",
"fields": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "id",
"keyType": "PRIMARY_KEY",
"type": 4,
"value": [
"java.lang.Long",
1
]
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "name",
"keyType": "NULL",
"type": 12,
"value": "1"
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "age",
"keyType": "NULL",
"type": 4,
"value": 999
}
]
]
}
]
]
}
},
{
"@class": "io.seata.rm.datasource.undo.SQLUndoLog",
"sqlType": "UPDATE",
"tableName": "staff",
"beforeImage": {
"@class": "io.seata.rm.datasource.sql.struct.TableRecords",
"tableName": "staff",
"rows": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Row",
"fields": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "id",
"keyType": "PRIMARY_KEY",
"type": 4,
"value": [
"java.lang.Long",
2
]
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "name",
"keyType": "NULL",
"type": 12,
"value": "2"
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "age",
"keyType": "NULL",
"type": 4,
"value": 3
}
]
]
}
]
]
},
"afterImage": {
"@class": "io.seata.rm.datasource.sql.struct.TableRecords",
"tableName": "staff",
"rows": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Row",
"fields": [
"java.util.ArrayList",
[
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "id",
"keyType": "PRIMARY_KEY",
"type": 4,
"value": [
"java.lang.Long",
2
]
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "name",
"keyType": "NULL",
"type": 12,
"value": "2"
},
{
"@class": "io.seata.rm.datasource.sql.struct.Field",
"name": "age",
"keyType": "NULL",
"type": 4,
"value": 999
}
]
]
}
]
]
}
}
]
]
}可以看到该表内存储了数据操作前和操作后的记录微服务项目分布式事务如果是微服务项目, 需要分布式事务支持, 配置如下引入依赖<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-seata</artifactId>
</dependency>配置和单体服务多库事务是一样的, seata.application-id可以不填, 默认取当前应用id每个模块都需要配置, 因为seata需要代理数据源但实际1.4.2版本使用jackson/fastjson序列化Date字段时会失败(https://github.com/seata/seata/issues/3883), 可以替换序列化方式为kryo需要额外引入依赖<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-serializer-kryo</artifactId>
<version>1.4.2</version>
</dependency>简易demo@Autowired
RoleFeignClient roleFeignClient;
@Autowired
StaffFeignClient staffFeignClient;
@GlobalTransactional(rollbackFor = Exception.class)
public RestResult<Boolean> globalTxTest() {
log.info("xid: {}", RootContext.getXID());
roleFeignClient.updateRole();
staffFeignClient.updateUser();
int i = 1 / 0;
return RestResult.ok();
}@Override
@Transactional(rollbackFor = Exception.class)
public RestResult<Boolean> updateRole() {
log.info("xid: {}", RootContext.getXID());
RolePO role1 = roleDAO.selectById(1);
role1.setName("1111111111");
roleDAO.updateById(role1);
RolePO role2 = roleDAO.selectById(2);
role2.setName("2222222");
roleDAO.updateById(role2);
return RestResult.ok();
}@Override
@Transactional(rollbackFor = Exception.class)
public RestResult<Boolean> updateUser() {
log.info("xid: {}", RootContext.getXID());
StaffPO staff1 = baseMapper.selectById(1);
staff1.setAge(999);
baseMapper.updateById(staff1);
StaffPO staff2 = baseMapper.selectById(2);
staff2.setAge(999);
baseMapper.updateById(staff2);
return RestResult.ok();
}
SpringCloud升级之路2020.0.x版-26.OpenFeign的组件
本系列代码地址:https://github.com/JoJoTec/spring-cloud-parent首先,我们给出官方文档中的组件结构图:官方文档中的组件,是以实现功能为维度的,我们这里是以源码实现为维度的(因为之后我们使用的时候,需要根据需要定制这些组件,所以需要从源码角度去拆分分析),可能会有一些小差异。负责解析类元数据的 ContractOpenFeign 是通过代理类元数据来自动生成 HTTP API 的,那么到底解析哪些类元数据,哪些类元数据是有效的,是通过指定 Contract 来实现的,我们可以通过实现这个 Contract 来自定义一些类元数据的解析,例如,我们自定义一个注解://仅可用于方法上
@java.lang.annotation.Target(METHOD)
//指定注解保持到运行时
@Retention(RUNTIME)
@interface Get {
//请求 uri
String uri();
}这个注解很简单,标注了这个注解的方法会被自动封装成 GET 请求,请求 uri 为 uri() 的返回。然后,我们自定义一个 Contract 来处理这个注解。由于 MethodMetadata 是 final 并且是 package private 的,所以我们只能继承 Contract.BaseContract 去自定义注解解析://外部自定义必须继承 BaseContract,因为里面生成的 MethodMetadata 的构造器是 package private 的
static class CustomizedContract extends Contract.BaseContract {
@Override
protected void processAnnotationOnClass(MethodMetadata data, Class<?> clz) {
//处理类上面的注解,这里没用到
}
@Override
protected void processAnnotationOnMethod(MethodMetadata data, Annotation annotation, Method method) {
//处理方法上面的注解
Get get = method.getAnnotation(Get.class);
//如果 Get 注解存在,则指定方法 HTTP 请求方式为 GET,同时 uri 指定为注解 uri() 的返回
if (get != null) {
data.template().method(Request.HttpMethod.GET);
data.template().uri(get.uri());
}
}
@Override
protected boolean processAnnotationsOnParameter(MethodMetadata data, Annotation[] annotations, int paramIndex) {
//处理参数上面的注解,这里没用到
return false;
}
}然后,我们来使用这个 Contract:interface HttpBin {
@Get(uri = "/get")
String get();
}
public static void main(String[] args) {
HttpBin httpBin = Feign.builder()
.contract(new CustomizedContract())
.target(HttpBin.class, "http://www.httpbin.org");
//实际上就是调用 http://www.httpbin.org/get
String s = httpBin.get();
}一般的,我们不会使用这个 Contract,因为我们业务上一般不会自定义注解。这是底层框架需要用的功能。比如在 spring-mvc 环境下,我们需要兼容 spring-mvc 的注解,这个实现类就是 SpringMvcContract。编码器 Encoder 与解码器 Decoder编码器与解码器接口定义:public interface Decoder {
Object decode(Response response, Type type) throws IOException, DecodeException, FeignException;
}
public interface Encoder {
void encode(Object object, Type bodyType, RequestTemplate template) throws EncodeException;
}OpenFeign 可以自定义编码解码器,我们这里使用 FastJson 自定义实现一组编码与解码器,来了解其中使用的原理。/**
* 基于 FastJson 的反序列化解码器
*/
static class FastJsonDecoder implements Decoder {
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException, FeignException {
//读取 body
byte[] body = response.body().asInputStream().readAllBytes();
return JSON.parseObject(body, type);
}
}
/**
* 基于 FastJson 的序列化编码器
*/
static class FastJsonEncoder implements Encoder {
@Override
public void encode(Object object, Type bodyType, RequestTemplate template) throws EncodeException {
if (object != null) {
//编码 body
template.header(CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType());
template.body(JSON.toJSONBytes(object), StandardCharsets.UTF_8);
}
}
}然后,我们通过 http://httpbin.org/anything 来测试,这个链接会返回我们发送的请求的一切元素。interface HttpBin {
@RequestLine("POST /anything")
Object postBody(Map<String, String> body);
}
public static void main(String[] args) {
HttpBin httpBin = Feign.builder()
.decoder(new FastJsonDecoder())
.encoder(new FastJsonEncoder())
.target(HttpBin.class, "http://www.httpbin.org");
Object o = httpBin.postBody(Map.of("key", "value"));
}查看响应,可以看到我们发送的 json body 被正确的接收到了。
SpringCloud升级之路2020.0.x版-5.所有项目的parent与spring-framework-common说明
本系列代码地址:https://github.com/HashZhang/spring-cloud-scaffold/tree/master/spring-cloud-iiford源代码文件:https://github.com/HashZhang/spring-cloud-scaffold/blob/master/spring-cloud-iiford/pom.xml1. 使用 log4j2 异步日志所需要的依赖:需要排除默认的日志实现 logback,增加 log4j2 的依赖,并且添加 log4j2 异步日志需要的 disruptor 依赖。<!--日志需要用log4j2-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
<exclusions>
<exclusion>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-logging</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-log4j2</artifactId>
</dependency>
<!--log4j2异步日志需要的依赖,所有项目都必须用log4j2和异步日志配置-->
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>${disruptor.version}</version>
</dependency>2. javax.xml 的相关依赖。我们的项目使用 JDK 11。JDK 9 之后的模块化特性导致 javax.xml 不自动加载,所以需要如下模块:<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.jaxb</groupId>
<artifactId>jaxb-runtime</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-xjc</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>${activation.version}</version>
</dependency>3. 使用 Junit 5 进行单元测试,Junit 5 使用可以参考:Junit5 user guide<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>4. 使用 Spring Boot 单元测试,可以参考:features.testing<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>5. mockito扩展,主要是需要mock final类:Spring Boot 单元测试已经包含了 mockito 依赖了,但是我们还需要 Mock final 类,所以添加以下依赖:<!--mockito扩展,主要是需要mock final类-->
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-inline</artifactId>
<version>${mokito.version}</version>
<scope>test</scope>
</dependency>6. embedded-redis:使用 embedded-redis 用于涉及 Redis 的单元测试:如果你的单元测试需要访问 redis,则需要在测试前初始化一个 redis,并在测试后关闭。使用 embedded-redis 就可以。我们在 spring-cloud-parent 中已经添加了这个依赖,所以可以直接使用。参考:embedded-redis<dependency>
<groupId>com.github.kstyrc</groupId>
<artifactId>embedded-redis</artifactId>
<version>${embedded-redis.version}</version>
<scope>test</scope>
</dependency>7. sqlite 单元测试依赖:对于数据库的单元测试,我们可以使用 SQLite。参考:sqlite-jdbc<dependency>
<groupId>org.xerial</groupId>
<artifactId>sqlite-jdbc</artifactId>
<version>${sqlite-jdbc.version}</version>
<scope>test</scope>
</dependency>8. 指定编译级别为 Java 11<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>11</source>
<!--ingore javac compiler assert error-->
<forceJavacCompilerUse>true</forceJavacCompilerUse>
<target>11</target>
</configuration>
</plugin>
</plugins>
</build>源代码文件:https://github.com/HashZhang/spring-cloud-scaffold/blob/master/spring-cloud-iiford/spring-cloud-iiford-spring-framework-common/pom.xml作为使用 spring 与 spring boot 框架的公共依赖 spring-framework-common 项目是一个纯依赖的项目。1. 内部缓存框架统一采用caffeine:这是一个很高效的本地缓存框架,接口设计与 Guava-Cache 完全一致,可以很容易地升级。性能上,caffeine 源码里面就有和 Guava-Cache, ConcurrentHashMap,ElasticSearchMap,Collision 和 Ehcache 等等实现的对比测试,并且测试给予了 yahoo 测试库,模拟了近似于真实用户场景,并且,caffeine 参考了很多论文实现不同场景适用的缓存,例如:Adaptive Replacement Cache:http://www.cs.cmu.edu/~15-440/READINGS/megiddo-computer2004.pdf 2.Quadruply-segmented LRU:http://www.cs.cornell.edu/~qhuang/papers/sosp_fbanalysis.pdf2 Queue:http://www.tedunangst.com/flak/post/2Q-buffer-cache-algorithmSegmented LRU:http://www.is.kyusan-u.ac.jp/~chengk/pub/papers/compsac00_A07-07.pdfFiltering-based Buffer Cache:http://storageconference.us/2017/Papers/FilteringBasedBufferCacheAlgorithm.pdf所以,我们选择 caffeine 作为我们的本地缓存框架,参考:caffeine<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>2. 使用 google 的 java 开发库 guava:guava 是 google 的 Java 库,虽然本地缓存我们不使用 guava,但是 guava 还有很多其他的元素我们经常用到。参考:guava docs<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>3. 内部序列化统一采用fastjson:注意 json 库一般都需要预热一下,后面会提到怎么做。参考:fastjson<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>${fastjson.version}</version>
</dependency>4. 使用 lombok 简化代码,参考:projectlombok<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>5. 调用路径记录 - sleuth。参考:spring-cloud-sleuth<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-sleuth</artifactId>
</dependency>6. 跨线程 ThreadLocal。参考:transmittable-thread-local<dependency>
<groupId>com.alibaba</groupId>
<artifactId>transmittable-thread-local</artifactId>
<version>${transmittable-thread-local.version}</version>
</dependency>7. Swagger 相关。参考:swagger<!--Swagger-->
<!-- swagger java元数据集成 -->
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${swagger.version}</version>
</dependency>
<!-- swagger 前端页面 -->
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${swagger.version}</version>
</dependency>8. Apache Commons 相关工具包。我们会使用一些 Commons 工具包,来简化代码:commons-langcommons-collectionscommons-text<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-collections4 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons-collections4.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-text -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-text</artifactId>
<version>${commons-text.version}</version>
</dependency>本小节我们详细说明了我们所有项目的 parent,以及 使用了 Spring 与 Spring Boot 特性的工具包依赖 spring-framework-common 的设计。下一节我们将详细分析提供微服务特性的依赖。
Spring Cloud 升级之路 - 2020.0.x - 1. 背景知识、需求描述与公共依赖(下)
1.2.2. 公共基础依赖包pom.xml<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>spring-cloud-iiford</artifactId>
<groupId>com.github.hashjang</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-iiford-common</artifactId>
<properties>
<guava.version>30.1.1-jre</guava.version>
<fastjson.version>1.2.75</fastjson.version>
<disruptor.version>3.4.2</disruptor.version>
<jaxb.version>2.3.1</jaxb.version>
<activation.version>1.1.1</activation.version>
</properties>
<dependencies>
<!--内部缓存框架统一采用caffeine-->
<!--这样Spring cloud loadbalancer用的本地实例缓存也是基于Caffeine-->
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
<!-- guava 工具包 -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<!--内部序列化统一采用fastjson-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>${fastjson.version}</version>
</dependency>
<!--日志需要用log4j2-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
<exclusions>
<exclusion>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-logging</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-log4j2</artifactId>
</dependency>
<!--lombok简化代码-->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<!--log4j2异步日志需要的依赖,所有项目都必须用log4j2和异步日志配置-->
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>${disruptor.version}</version>
</dependency>
<!--JDK 9之后的模块化特性导致javax.xml不自动加载,所以需要如下模块-->
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.jaxb</groupId>
<artifactId>jaxb-runtime</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-xjc</artifactId>
<version>${jaxb.version}</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>${activation.version}</version>
</dependency>
</dependencies>
</project>1. 缓存框架 caffeine 很高效的本地缓存框架,接口设计与 Guava-Cache 完全一致,可以很容易地升级。性能上,caffeine 源码里面就有和 Guava-Cache, ConcurrentHashMap,ElasticSearchMap,Collision 和 Ehcache 等等实现的对比测试,并且测试给予了 yahoo 测试库,模拟了近似于真实用户场景,并且,caffeine 参考了很多论文实现不同场景适用的缓存,例如:Adaptive Replacement Cache:http://www.cs.cmu.edu/~15-440/READINGS/megiddo-computer2004.pdf2.Quadruply-segmented LRU:http://www.cs.cornell.edu/~qhuang/papers/sosp_fbanalysis.pdf2 Queue:http://www.tedunangst.com/flak/post/2Q-buffer-cache-algorithmSegmented LRU:http://www.is.kyusan-u.ac.jp/~chengk/pub/papers/compsac00_A07-07.pdfFiltering-based Buffer Cache:http://storageconference.us/2017/Papers/FilteringBasedBufferCacheAlgorithm.pdf所以,我们选择 caffeine 作为我们的本地缓存框架参考:https://github.com/ben-manes/caffeine2. guavaguava 是 google 的 Java 库,虽然本地缓存我们不使用 guava,但是 guava 还有很多其他的元素我们经常用到。参考:https://guava.dev/releases/snapshot-jre/api/docs/3. 内部序列化从 fastjson 改为 jacksonjson 库一般都需要预热一下,后面会提到怎么做。 我们项目中有一些内部序列化是 fastjson 序列化,但是看 fastjson 已经很久没有更新,有很多 issue 了,为了避免以后出现问题(或者漏洞,或者性能问题)增加线上可能的问题点,我们这一版本做了兼容。在下一版本会把 fastjson 去掉。后面会详细说明如何去做。4. 日志采用 log4j2主要是看中其异步日志的特性,让打印大量业务日志不成为性能瓶颈。但是,还是不建议在线上环境输出代码行等位置信息,具体原因以及解决办法后面会提到。由于 log4j2 异步日志特性依赖 disruptor,还需要加入 disruptor 的依赖。参考:https://logging.apache.org/log4j/2.x/https://lmax-exchange.github.io/disruptor/5. 兼容 JDK 9+ 需要添加的一些依赖JDK 9之后的模块化特性导致 javax.xml 不自动加载,而项目中的很多依赖都需要这个模块,所以手动添加了这些依赖。1.2.3. Servlet 微服务公共依赖pom.xml<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>spring-cloud-iiford</artifactId>
<groupId>com.github.hashjang</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-iiford-service-common</artifactId>
<dependencies>
<dependency>
<groupId>com.github.hashjang</groupId>
<artifactId>spring-cloud-iiford-common</artifactId>
<version>${project.version}</version>
</dependency>
<!--注册到eureka-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>
</dependency>
<!--不用Ribbon,用Spring Cloud LoadBalancer-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-loadbalancer</artifactId>
</dependency>
<!--微服务间调用主要靠 openfeign 封装 API-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-openfeign</artifactId>
</dependency>
<!--resilience4j 作为重试,断路,限并发,限流的组件基础-->
<dependency>
<groupId>io.github.resilience4j</groupId>
<artifactId>resilience4j-spring-cloud2</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/io.github.resilience4j/resilience4j-feign -->
<dependency>
<groupId>io.github.resilience4j</groupId>
<artifactId>resilience4j-feign</artifactId>
</dependency>
<!--actuator接口-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<!--调用路径记录-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-sleuth</artifactId>
</dependency>
<!--暴露actuator相关端口-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<!--暴露http接口, servlet框架采用nio的undertow,注意直接内存使用,减少GC-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<exclusions>
<exclusion>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-tomcat</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-undertow</artifactId>
</dependency>
</dependencies>
</project>这里面相关的依赖,我们后面会用到。1.2.4. Webflux 微服务相关依赖对于 Webflux 响应式风格的微服务,其实就是将 spring-boot-starter-web 替换成 spring-boot-starter-webflux 即可
Spring Boot 2.0 + FastJson 1.2.+作为JSON序列化
SpringBoot配置FastJson的时候,报错:java.lang.IllegalArgumentException: Content-Type cannot contain wildcard type '*'
at org.springframework.util.Assert.isTrue(Assert.java:116) ~[spring-core-5.0.13.RELEASE.jar!/:5.0.13.RELEASE]
at org.springframework.http.HttpHeaders.setContentType(HttpHeaders.java:861) ~[spring-web-5.0.13.RELEASE.jar!/:5.0.13.RELEASE]
at org.springframework.http.converter.AbstractHttpMessageConverter.addDefaultHeaders(AbstractHttpMessageConverter.java:255) ~[spring-web-5.0.13.RELEASE.jar!/:5.0.13.RELEASE]
at org.springframework.http.converter.AbstractHttpMessageConverter.write(AbstractHttpMessageConverter.java:210) ~[spring-web-5.0.13.RELEASE.jar!/:5.0.13.RELEASE]
at com.alibaba.fastjson.support.spring.FastJsonHttpMessageConverter.write(FastJsonHttpMessageConverter.java:244) ~[fastjson-1.2.57.jar!/:?]看FastJson初始化:public static final MediaType ALL = valueOf("*/*");
public FastJsonHttpMessageConverter() {
super(MediaType.ALL); // */*
}看来不能有通配符,所以需要像下面配置:@Bean
public HttpMessageConverters fastJsonHttpMessageConverters() {
//创建FastJson信息转换对象
FastJsonHttpMessageConverter fastJsonHttpMessageConverter =
new FastJsonHttpMessageConverter();
List<MediaType> supportedMediaTypes = Lists.newArrayList();
//从1.1.41升级到1.2.之后的版本必须配置,否则会报错
supportedMediaTypes.add(MediaType.APPLICATION_JSON);
supportedMediaTypes.add(MediaType.APPLICATION_JSON_UTF8);
fastJsonHttpMessageConverter.setSupportedMediaTypes(supportedMediaTypes);
//创建FastJson对象并设定序列化规则
FastJsonConfig fastJsonConfig = new FastJsonConfig();
//添加自定义valueFilter
//规则赋予转换对象
fastJsonHttpMessageConverter.setFastJsonConfig(fastJsonConfig);
StringHttpMessageConverter stringHttpMessageConverter =
new StringHttpMessageConverter(Charset.defaultCharset());
fastJsonConfig.setSerializerFeatures(
//消除对同一对象循环引用的问题,默认为false(如果不配置有可能会进入死循环)
SerializerFeature.DisableCircularReferenceDetect,
//是否输出值为null的字段,默认为false
SerializerFeature.WriteMapNullValue
);
return new HttpMessageConverters(fastJsonHttpMessageConverter, stringHttpMessageConverter);
}
实战监听Eureka client的缓存更新
欢迎访问我的GitHub这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos从Eureka server获取服务列表Spring cloud环境中的应用,如果注册到Eureka server,就会从Eureka server获取所有应用的注册信息(也叫服务列表),然后保存到本地,这个操作是周期性的,默认每三十秒一次;以下是来自官方的架构图,可以看到Application Service向Eureka Server有Get Registry的请求:参考文章如果您有兴趣,想深入了解spring广播机制或者Eureka client更新服务列表,推荐您参考以下两篇文章:《spring4.1.8扩展实战之三:广播与监听 》《Spring Cloud源码分析之Eureka篇第五章:更新服务列表 》实战内容本文是一篇实战的文章,实战内容如下:启动Eureka server;开发一个应用springcloudcustomizelistener,启动后会注册到Eureka server;此时该应用身份为Eureka client,会周期性的从Eureka server获取服务列表(已有逻辑);每次成功获取的服务列表成功都会存入本地缓存(已有逻辑);存入缓存后,会在spring容器内发送广播(已有逻辑);本次实战的重点就是自定义一个监听器来接收上述广播,收到广播后把详情用日志打印出来;再启动另一个应用springclouddeepprovider,也会注册到Eureka server;再去观察springcloudcustomizelistener的广播监听日志,会发现springclouddeepprovider的注册信息;Eureka client缓存服务列表的源码简介实战前,先对Eureka client缓存服务列表的实现源码做个简介,这样才能做出匹配的监听器;应用作为Eureka Client的启动时,在com.netflix.discovery.DiscoveryClient类的initScheduledTasks方法中,会启动周期性任务,每隔30秒从Eureka server获取服务列表信息,如下图,红框中的TimedSupervisorTask负责周期性执行,绿框中的CacheRefreshThread负责具体的更新逻辑:在CacheRefreshThread类中经过层层调用,获取服务列表并更新本地缓存的逻辑在fetchRegistry方法中实现,如下图,红框中的getAndStoreFullRegistry方法负责全量更新,绿框中的getAndUpdateDelta方法负责增量更新,黄框中的onCacheRefreshed方法就是今天的重点:发送广播,广播类型是服务列表的本地缓存已更新onCacheRefreshed方法在子类CloudEurekaClient中被重写,可见这里发送了一个普通的spring容器内广播,类型是HeartbeatEvent,我们可以自定义监听类来接收广播,并通过泛型规定只接受HeartbeatEvent类型: @Override
protected void onCacheRefreshed() {
if (this.cacheRefreshedCount != null) { //might be called during construction and will be null
long newCount = this.cacheRefreshedCount.incrementAndGet();
log.trace("onCacheRefreshed called with count: " + newCount);
//spring容器内广播,HeartbeatEvent实例在创建时收到两个参数:CloudEurekaClient实例和缓存刷新次数
this.publisher.publishEvent(new HeartbeatEvent(this, newCount));
}
}实战应用设定本次实战要搭建一个小的Spring Cloud环境,包括以下应用:应用名称身份作用端口启动顺序springclouddeepeurekaEureka server注册中心8081第一springcloudcustomizelistenerEureka client有自定义广播监听器,监听服务列表缓存更新的广播8085第二springclouddeepproviderEureka client普通web应用8082第三源码下载springclouddeepeureka和springclouddeepprovider这两个应用,在文章《Spring Cloud源码分析之Eureka篇第一章:准备工作》中已有详细介绍,本文中就不多说了,您可以参考文章,也可以在github下载这两个应用的源码,地址和链接信息如下表所示:名称链接备注项目主页https://github.com/zq2599/blog_demos该项目在GitHub上的主页git仓库地址(https)https://github.com/zq2599/blog_demos.git该项目源码的仓库地址,https协议git仓库地址(ssh)
[email protected]:zq2599/blog_demos.git该项目源码的仓库地址,ssh协议这个git项目中有多个文件夹,本章源码分别在springclouddeepeureka、springclouddeepprovider这两个文件夹下,如下图红框所示:启动springclouddeepeureka应用springclouddeepeureka开发完成后就立即启动,在浏览器访问地址:http://localhost:8081,可见Eureka server已经启动,不过还没有任何应用注册上来,如下图:开发springcloudcustomizelistener接下来一起开发应用springcloudcustomizelistener,在此应用中添加自定义的spring广播监听器,如果您不想敲代码,也可以从github上直接下载源码,地址和链接信息如下表所示:名称链接备注项目主页https://github.com/zq2599/blog_demos该项目在GitHub上的主页git仓库地址(https)https://github.com/zq2599/blog_demos.git该项目源码的仓库地址,https协议git仓库地址(ssh)
[email protected]:zq2599/blog_demos.git该项目源码的仓库地址,ssh协议这个git项目中有多个文件夹,本章源码在springcloudcustomizelistener文件夹下,如下图红框所示:一起来开发吧:创建一个springboot的web应用,pom.xml内容如下,注意为了在日志中展示更详细的内容,依赖了fastjson库:<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.bolingcavalry</groupId>
<artifactId>springcloudcustomizelistener</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>springcloudcustomizelistener</name>
<description>Demo project for Spring Boot</description>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>1.5.9.RELEASE</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.28</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>Edgware.RELEASE</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>应用配置文件application.yml的内容如下:server:
port: 8085
spring:
application:
name: springcloud-customize-listener
eureka:
client:
serviceUrl:
defaultZone: http://localhost:8081/eureka/
instance:
prefer-ip-address: true创建监听器EurekaCacheRefreshListener.java,前面已经分析过HeartbeatEvent实例的两个成员变量,值为缓存刷新次数和CloudEurekaClient实例,在收到广播时,将这两个成员变量都在日志中打印出来:@Component
public class EurekaCacheRefreshListener implements ApplicationListener<HeartbeatEvent> {
private static final Logger logger = LoggerFactory.getLogger(EurekaCacheRefreshListener.class);
@Override
public void onApplicationEvent(HeartbeatEvent event) {
Object count = event.getValue();
Object source = event.getSource();
logger.info("start onApplicationEvent, count [{}], source :\n{}", count, JSON.toJSON(source));
}
}启动应用,等待大约30秒左右,EurekaCacheRefreshListener中的日志就会在控制台输出,内容十分丰富,建议您复制这些内容去在线格式化JSON的网站做一下格式化再来看,下面列出部分关键信息,其他的内容已经略去:{
"instanceRemoteStatus": "UP",
"lastSuccessfulHeartbeatTimePeriod": 316,
"allKnownRegions": [
"us-east-1"
],
...
此处省略部分内容
...
"applications": {
"appsHashCode": "UP_1_",
"registeredApplications": [
{
"instances": [
{
"hostName": "192.168.31.104",
"overriddenStatus": "UNKNOWN",
"metadata": {
"jmx.port": "64656",
"management.port": "8085"
},
"statusPageUrl": "http://192.168.31.104:8085/info",
"secureVipAddress": "springcloud-customize-listener",
"leaseInfo": {
"renewalIntervalInSecs": 30,
"registrationTimestamp": 1537827838033,
"evictionTimestamp": 0,
"renewalTimestamp": 1537827987419,
"durationInSecs": 90,
"serviceUpTimestamp": 1537827837461
},
"homePageUrl": "http://192.168.31.104:8085/",
"countryId": 1,
"sID": "na",
"securePort": 443,
"dataCenterInfo": {
"name": "MyOwn"
},
"instanceId": "DESKTOP-82CCEBN:springcloud-customize-listener:8085",
"coordinatingDiscoveryServer": false,
"id": "DESKTOP-82CCEBN:springcloud-customize-listener:8085",
"vIPAddress": "springcloud-customize-listener",
"dirty": false,
"lastUpdatedTimestamp": 1537827838033,
"healthCheckUrl": "http://192.168.31.104:8085/health",
"appName": "SPRINGCLOUD-CUSTOMIZE-LISTENER",
"lastDirtyTimestamp": 1537827837401,
"iPAddr": "192.168.31.104",
"version": "unknown",
"actionType": "ADDED",
"port": 8085,
"healthCheckUrls": [
"http://192.168.31.104:8085/health"
],
"status": "UP"
}
]如上所示,当前应用的实例信息在本地已经缓存了;启动springclouddeepprovider应用springclouddeepprovider是最后一个启动的应用,启动该应用后,再去观察springcloudcustomizelistener的日志,发现应用springclouddeepprovider的注册信息已经获取到了:{
"instances": [
{
"hostName": "192.168.119.1",
"overriddenStatus": "UNKNOWN",
"metadata": {
"jmx.port": "58420",
"management.port": "8082"
},
"statusPageUrl": "http://192.168.119.1:8082/info",
"secureVipAddress": "springcloud-deep-provider",
"leaseInfo": {
"renewalIntervalInSecs": 30,
"registrationTimestamp": 1537840715486,
"evictionTimestamp": 0,
"renewalTimestamp": 1537840715486,
"durationInSecs": 90,
"serviceUpTimestamp": 1537840715486
},
"homePageUrl": "http://192.168.119.1:8082/",
"countryId": 1,
"sID": "na",
"securePort": 443,
"dataCenterInfo": {
"name": "MyOwn"
},
"instanceId": "localhost:springcloud-deep-provider:8082",
"coordinatingDiscoveryServer": false,
"id": "localhost:springcloud-deep-provider:8082",
"vIPAddress": "springcloud-deep-provider",
"dirty": false,
"lastUpdatedTimestamp": 1537840715486,
"healthCheckUrl": "http://192.168.119.1:8082/health",
"appName": "SPRINGCLOUD-DEEP-PROVIDER",
"lastDirtyTimestamp": 1537840715451,
"iPAddr": "192.168.119.1",
"version": "unknown",
"actionType": "ADDED",
"port": 8082,
"healthCheckUrls": [
"http://192.168.119.1:8082/health"
],
"status": "UP"
}
],
"name": "SPRINGCLOUD-DEEP-PROVIDER",
"instancesAsIsFromEureka": [
{
"hostName": "192.168.119.1",
"overriddenStatus": "UNKNOWN",
"metadata": {
"jmx.port": "58420",
"management.port": "8082"
},
"statusPageUrl": "http://192.168.119.1:8082/info",
"secureVipAddress": "springcloud-deep-provider",
"leaseInfo": {
"renewalIntervalInSecs": 30,
"registrationTimestamp": 1537840715486,
"evictionTimestamp": 0,
"renewalTimestamp": 1537840715486,
"durationInSecs": 90,
"serviceUpTimestamp": 1537840715486
},
"homePageUrl": "http://192.168.119.1:8082/",
"countryId": 1,
"sID": "na",
"securePort": 443,
"dataCenterInfo": {
"name": "MyOwn"
},
"instanceId": "localhost:springcloud-deep-provider:8082",
"coordinatingDiscoveryServer": false,
"id": "localhost:springcloud-deep-provider:8082",
"vIPAddress": "springcloud-deep-provider",
"dirty": false,
"lastUpdatedTimestamp": 1537840715486,
"healthCheckUrl": "http://192.168.119.1:8082/health",
"appName": "SPRINGCLOUD-DEEP-PROVIDER",
"lastDirtyTimestamp": 1537840715451,
"iPAddr": "192.168.119.1",
"version": "unknown",
"actionType": "ADDED",
"port": 8082,
"healthCheckUrls": [
"http://192.168.119.1:8082/health"
],
"status": "UP"
}
]
}至此,本次实战就完成了,通过开发自定义的广播监听器,我们对Eureka的注册发现机制有了进一步了解,在您的Spring Cloud学习过程中,希望本文能祝您一臂之力;欢迎关注阿里云开发者社区博客:程序员欣宸学习路上,你不孤单,欣宸原创一路相伴...
ElasticSearch7入门(五)SpringBoot2.3.0集成ElasticSearch7.5.2-HighLevelClient
背景今天来实现SpringBoot集成ElasticSearch,ElasticSearch官方提供了两种Java REST Client。推荐使用HighLevelClient的方式,HighLevelClient本身是基于Low Level REST Client封装而来。The Java REST Client comes in 2 flavors:
Java Low Level REST Client: the official low-level client for Elasticsearch. It allows to communicate with an Elasticsearch cluster through http. Leaves requests marshalling and responses un-marshalling to users. It is compatible with all Elasticsearch versions.
Java High Level REST Client: the official high-level client for Elasticsearch. Based on the low-level client, it exposes API specific methods and takes care of requests marshalling and responses un-marshalling.核心依赖<!--ES-->
<!--The High Level Java REST Client depends on the following artifacts and their transitive dependencies:
org.elasticsearch.client:elasticsearch-rest-client
org.elasticsearch:elasticsearch
-->
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-high-level-client</artifactId>
<version>7.5.2</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-client</artifactId>
<version>7.5.2</version>
</dependency>
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>7.5.2</version>
</dependency>核心方法package com.heartsuit.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.springframework.stereotype.Component;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* @Author Heartsuit
* @Date 2020-06-04
*/
@Slf4j
@Component
public class ElasticSearchServiceImpl implements ElasticSearchService {
private final RestHighLevelClient client;
public ElasticSearchServiceImpl(RestHighLevelClient client) {
this.client = client;
}
/**
* 创建索引
*
* @param index 必须小写,格式参考文档,否则报错:Elasticsearch exception [type=invalid_index_name_exception, reason=Invalid index name [OK], must be lowercase]
* @return 是否创建成功
*/
@Override
public boolean createIndex(String index) {
CreateIndexResponse response;
try {
if (!this.existsIndex(index)) {
response = client.indices().create(new CreateIndexRequest(index), RequestOptions.DEFAULT);
} else {
return true;//索引已存在
}
} catch (Exception e) {
log.error("ElasticSearch 创建索引异常:{}", e.getMessage());
return false;
}
return response.isAcknowledged();
}
/**
* 判断索引是否存在
*
* @param index
* @return
*/
private boolean existsIndex(String index) throws IOException {
return client.indices().exists(new GetIndexRequest(index), RequestOptions.DEFAULT);
}
/**
* 删除索引
*
* @param index 必须小写,格式参考文档,否则:找不到大写索引名
* @return 是否删除成功
*/
@Override
public boolean deleteIndex(String index) {
AcknowledgedResponse response = null;
try {
if (this.existsIndex(index)) {
response = client.indices().delete(new DeleteIndexRequest(index), RequestOptions.DEFAULT);
} else {
return true;//索引不存在
}
} catch (Exception e) {
log.error("ElasticSearch 删除索引异常:{}", e.getMessage());
return false;
}
return response.isAcknowledged();
}
/**
* 创建文档
* id相同则更新、不同则创建,数据格式(字段)不同则空,字段为追加模式
*
* @param index 索引
* @param data 数据
* @param dataType 格式类型 例:XContentType.JSON
* @param id 唯一标识 put /index/1
* @return
*/
@Override
public int insertDocument(String index, Object data, XContentType dataType, String id) {
IndexRequest request = new IndexRequest(index);
request.id(id);
String dataString = JSONObject.toJSONString(data);
request.source(dataString, dataType);
IndexResponse response = null;
try {
response = client.index(request, RequestOptions.DEFAULT);
} catch (Exception e) {
log.error("ElasticSearch 创建文档异常:{}", e.getMessage());
}
return response != null ? response.status().getStatus() : 400;
}
/**
* 获取文档
*
* @param index
* @param id
* @param mappingClass
* @param <T>
* @return
*/
@Override
public <T> T getDocument(String index, String id, Class<T> mappingClass) {
GetResponse getResponse = null;
try {
if (this.existsIndex(index)) {
GetRequest getRequest = new GetRequest(index, id);
getResponse = client.get(getRequest, RequestOptions.DEFAULT);
String sourceAsString = getResponse.getSourceAsString();
if (sourceAsString == null || sourceAsString.isEmpty()) {
return null;
}
/**Jackson日期时间序列化问题:
* Cannot construct instance of `java.time.LocalDateTime` (no Creators, like default constructor, exist): no String-argument constructor/factory method to deserialize from String value ('2020-06-04 15:07:54')
*/
// ObjectMapper objectMapper = new ObjectMapper();
// objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
// objectMapper.registerModule(new JavaTimeModule());
// T result = objectMapper.readValue(sourceAsString, mappingClass);
T result = JSON.parseObject(sourceAsString, mappingClass);
return result;
}
} catch (Exception e) {
log.error("ElasticSearch 获取文档异常:{}", e.getMessage());
}
return null;
}
/**
* 更新文档信息
*
* @param index
* @param data
* @param dataType
* @param id
* @return
*/
@Override
public int updateDocument(String index, Object data, XContentType dataType, String id) {
UpdateResponse updateResponse = null;
try {
if (this.existsIndex(index)) {
UpdateRequest updateRequest = new UpdateRequest(index, id);
String dataString = JSONObject.toJSONString(data);
updateRequest.doc(dataString, dataType);
updateResponse = client.update(updateRequest, RequestOptions.DEFAULT);
}
} catch (Exception e) {
log.error("ElasticSearch 更新文档异常:{}", e.getMessage());
}
return updateResponse != null ? updateResponse.status().getStatus() : 400;
}
/**
* 删除文档
*
* @param index
* @param id
* @return
*/
@Override
public int deleteDocument(String index, String id) {
DeleteResponse deleteResponse = null;
try {
if (this.existsIndex(index)) {
DeleteRequest deleteRequest = new DeleteRequest(index, id);
deleteResponse = client.delete(deleteRequest, RequestOptions.DEFAULT);
}
} catch (Exception e) {
log.error("ElasticSearch 删除文档异常:{}", e.getMessage());
}
return deleteResponse != null ? deleteResponse.status().getStatus() : 400;
}
/**
* 批量操作文档信息
* 备注:暂局限入参list,可扩展其他<?>
*
* @param index
* @param list 标识相同则覆盖,否则新增
* @param dataType
* @return
*/
@Override
public boolean batchInsertDocument(String index, List<?> list, XContentType dataType) {
BulkRequest bulkRequest = new BulkRequest();
for (Object obj : list) {
// 自动生成id
bulkRequest.add(new IndexRequest(index).source(JSON.toJSONString(obj), dataType));
}
BulkResponse bulk = null;
try {
bulk = client.bulk(bulkRequest, RequestOptions.DEFAULT);
} catch (Exception e) {
log.error("ElasticSearch批量操作文档信息异常:{}", e.getMessage());
}
return bulk != null && !bulk.hasFailures();
}
/**
* 查询数据
* 备注:可拓展深入精准查询、范围查询、模糊查询、匹配所有等
*
* @param index
* @return
*/
@Override
public List<Map<String, Object>> searchDocument(String index) {
SearchRequest searchRequest = new SearchRequest(index);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
MatchAllQueryBuilder termQueryBuilder = QueryBuilders.matchAllQuery();
sourceBuilder.query(termQueryBuilder);
// sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
searchRequest.source(sourceBuilder);
SearchResponse search;
try {
search = client.search(searchRequest, RequestOptions.DEFAULT);
} catch (Exception e) {
log.error("ElasticSearch 查询数据异常:{}", e.getMessage());
return null;
}
SearchHit[] hits = search.getHits().getHits();
List<Map<String, Object>> mapList = new ArrayList<>();
for (SearchHit hit : hits) {
mapList.add(hit.getSourceAsMap());
}
return mapList;
}
private void close(RestHighLevelClient client) {
try {
client.close();
} catch (IOException e) {
log.error("ElasticSearch 关闭异常:{}", e.getMessage());
}
}
}Note: 使用FastJson替换了SpringBoot的默认Json解析器Jackson,
Springboot 最细节全面的接口传参接参介绍,总有你喜欢的一种方式
这篇里面对每种传参接参方式都会举出较多的例子,不多说,入正题:@PathVariable一 @GetMapping("/getId/{id}")
public String pathVariableTest(@PathVariable Integer id) {
return "id: "+id;
}二? @GetMapping("/getId/{id}")
public String pathVariableTest(@PathVariable("id") Integer id) {
return "id: "+id;
}三? @GetMapping("/getId/{idValue}")
public String pathVariableTest(@PathVariable("idValue") Integer id) {
return "id: "+id;
}以上三种方式的调用结果都是成功的:虽然说三种方式都是成功的,但是推荐使用第二种,也就是明确地指定对应参数对应名称。第一种方式不注明参数名称的,仅仅在单个参的时候选择使用。最后给出推荐使用的第二种方式的传接多参例子: @GetMapping("/getIdAny/{id}/{name}")
public String pathVariableTestAny(@PathVariable("id") Integer id,@PathVariable("name") String name) {
return "**id: "+id+" **name: "+name;
}调用结果(http://localhost:8023/getId/1001/JCccc):@RequestParam一 @GetMapping("/getId")
public String requestParamTest( Integer id) {
return "id: "+id;
}二? @GetMapping("/getId")
public String requestParamTest( @RequestParam Integer id) {
return "id: "+id;
}三 @GetMapping("/getId")
public String requestParamTest( @RequestParam("id") Integer id) {
return "id: "+id;
}以上三种方式都是可以正常调用的:四 @GetMapping("/getId")
public String requestParamTest( @RequestParam("idValue") Integer id) {
return "id: "+id;
}第四种,可以看到在@RequestParam里面给参数id取名为: idValue,这时候传参时参数名称也需要写成idValue即可。那么以上四种方式,我推荐第三种,也就是规范地注解命名对应参数名称。最后给出推荐使用的第三种方式的传接多参例子: @GetMapping("/getIdAny")
public String requestParamTestAny( @RequestParam("id") Integer id,@RequestParam("name") String name,@RequestParam Integer age) {
return "**id: "+id+" **name: "+name+" **age: "+age;
}调用结果(http://localhost:8023/getIdAny?id=1002&name=JCccc&age=10):上传文件针对使用@RequestParam,补充一下上传文件的是需要注意的以及方式:简单示例一下怎么接收以及传递文件(使用MultipartFile接收): @ResponseBody
@PostMapping("upload")
public String upload(@RequestParam("myFile") MultipartFile file) {
return "---file name:" + file.getOriginalFilename()+
"---file type:" + file.getContentType()+
"---file size:" + file.getSize();
}@RequestBodyPS:对应RequestBody的使用,方式其实是比较多的,可以耐心看看,传参以json格式传递,接收方式较多(一定要记住Content-Type为application/json)通过实体类接参的方式:User.java:PS:Get 方式 和 Post方式 其实都是可以有requestBody的,只不过一般来说post用的较多(后面都用post方式举例)。 @GetMapping("/getRequestBodyValue")
public String RequestBodyTest(@RequestBody User user) {
return user.toString();
}调用结果:通过Map接参的方式:单独获取参通过map的get方法根据key获取即可 @PostMapping("/getRequestBodyValue")
public String RequestBodyTest(@RequestBody Map userMap) {
return userMap.toString();
}调用结果:通过JsonObject接参的方式:这里用到了fastjson的依赖包: <!--添加fastjson依赖-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.7</version>
</dependency> @PostMapping("/getRequestBodyValue")
public String RequestBodyTest(@RequestBody JSONObject jsonObject) {
Integer id = jsonObject.getInteger("id");
String name = jsonObject.getString("name");
Integer age = jsonObject.getInteger("age");
return "---id:"+id+" ---name:"+name+" ---age:"+age;
}调用结果:通过String接参数的方式:这种情况是先使用String把body里面的数据获取出来,再转换成jsonobject再进行参数解析 @PostMapping("/getRequestBodyValue")
public String RequestBodyTest(@RequestBody String jsonStr) {
JSONObject jsonObject= JSON.parseObject(jsonStr); //将jsonStr转化为JSONObject,再进行取值
Integer id = jsonObject.getInteger("id");
String name = jsonObject.getString("name");
Integer age = jsonObject.getInteger("age");
return "---id:"+id+" ---name:"+name+" ---age:"+age;
}调用结果:另外补充这种没有通过json格式传参的,以实体类传参接参方式:举例使用User实体类,User.java: @GetMapping("/getValue")
public String entityValueTest( User user) {
return user.toString();
}调用方式(参数名需与实体类字段名报持一致):全部参数都传,只传部分参数,?@RequestHeader:这是取出放在header里面的值,如: @ResponseBody
@RequestMapping("/getMyHeaderParam")
public String getOrderList(@RequestHeader String token,@RequestHeader String uuid) {
return "----token:"+token+"--- uuid:"+uuid;
}HttpServletRequest?: @GetMapping("/getHttpServletRequest")
public String httpServletRequestValueTest(HttpServletRequest request) {
String id = request.getParameter("id");
String name = request.getParameter("name");
String age = request.getParameter("age");
return "---id:"+id+" ---name:"+name+" ---age:"+age;
} @GetMapping("/getHttpServletRequest")
public String httpServletRequestValueTest(HttpServletRequest request) {
Map<String, String[]> parameterMap = request.getParameterMap();
String[] ids = parameterMap.get("id");
String[] names = parameterMap.get("name");
String[] ages = parameterMap.get("age");
String id = ids[0];
String name =names[0];
String age =ages[0];
return "---id:"+id+" ---name:"+name+" ---age:"+age;
}以上两种方式调用结果:通过HttpServletRequest?获取body里面的json数据:(其实这种方式是较为麻烦的,不太推荐,还不如使用上面提到的@RequestBody好了) @PostMapping("/getHttpServletRequest")
public String get(HttpServletRequest request) throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(request.getInputStream()));
String str = "";
String wholeStr = "";
//一行一行的读取body体里面的内容
while((str = reader.readLine()) != null){
wholeStr += str;
}
//转化成JSONObject
JSONObject jsonObject=JSONObject.parseObject(wholeStr);
Integer id = jsonObject.getInteger("id");
String name = jsonObject.getString("name");
Integer age = jsonObject.getInteger("age");
return "---id:"+id+" ---name:"+name+" ---age:"+age;
}调用结果:OK,各种方式获取参数介绍就到此。---------------------------------对于评论中问到Get请求方式,RequestBody 能不能使用? 其实文中我有说到是可以的,只是不推荐(其实跟的GET请求跟POST请求的所为的区别都是虚假,只能说是一种约束,规定。因为本身的HTTP协议上,人家可没说body只能Post请求使用;人家也没说GET和POST有长度限制,只不过是因为浏览器和服务器的种种原因生成了一些规定而已。这种东西就不展开了,了解知道就行。)。示例:结果(能正常接收到):调试接口的工具,可以参考这篇:做一个合格的开发,从玩转Apipost开始_小目标青年的博客-CSDN博客_apipost压测
springboot基础入门之json转换框架 、全局异常捕捉以及JPA连接数据库
一、Spring boot json转换框架个人使用比较习惯的json框架是fastjson,所以spring boot默认的json使用起来就很陌生了,所以很自然我就想我能不能使用fastjson进行json解析呢?<dependencies>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.15</version>
</dependencies>
复制代码?????? 这里要说下很重要的话,官方文档说的1.2.10以后,会有两个方法支持HttpMessageconvert,一个是FastJsonHttpMessageConverter,支持4.2以下的版本,一个是FastJsonHttpMessageConverter4支持4.2以上的版本,具体有什么区别暂时没有深入研究。这里也就是说:低版本的就不支持了,所以这里最低要求就是1.2.10+。配置fastjon支持两种方法:第一种方法:(1)启动类继承extends WebMvcConfigurerAdapter(2)覆盖方法configureMessageConverters第二种方法:(1)在App.java启动类中,注入Bean : HttpMessageConverters具体代码如下:代码:App.javaimport java.util.List;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.http.converter.HttpMessageConverter;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.alibaba.fastjson.support.spring.FastJsonHttpMessageConverter;
//如果想集成其他的json框架需要继承WebMvcConfigurerAdapter,并重写configureMessageConverters
@SpringBootApplication
public class App extends WebMvcConfigurerAdapter {
// 第一种方式,重写configureMessageConverters,并将FastJsonConverter设置到系统中
@Override
public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
FastJsonHttpMessageConverter converter = new FastJsonHttpMessageConverter();
converter.setFeatures(SerializerFeature.PrettyFormat);
converters.add(converter);
super.configureMessageConverters(converters);
}
// 第二种方法:注入beanHttpMessageConverters
/*
* @Bean public HttpMessageConverters faMessageConverters(){
* return new HttpMessageConverters(new FastJsonHttpMessageConverter()); }
*/
public static void main(String[] args) {
SpringApplication.run(App.class, args);
}
}
复制代码二、springboot全局异常捕捉在一个项目中的异常我们我们都会统一进行处理的,那么如何进行统一进行处理呢?新建一个类GlobalDefaultExceptionHandler,在class注解上@ControllerAdvice,@ControllerAdvice:即把@ControllerAdvice注解内部使用@ExceptionHandler、@InitBinder、@ModelAttribute注解的方法应用到所有的?@RequestMapping注解的方法。非常简单,不过只有当使用@ExceptionHandler最有用,另外两个用处不大。在方法上注解上@ExceptionHandler(value = Exception.class),具体代码如下package com.hpit.base.exception;
import javax.servlet.http.HttpServletRequest;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
@ControllerAdvice
publicclass GlobalDefaultExceptionHandler {
@ExceptionHandler(value = Exception.class)
publicvoid defaultErrorHandler(HttpServletRequest req, Exception e) {
//打印异常信息:
e.printStackTrace();
System.out.println("GlobalDefaultExceptionHandler.defaultErrorHandler()");
/*
* 返回json数据或者String数据:
* 那么需要在方法上加上注解:@ResponseBody
* 添加return即可。
*/
/*
* 返回视图:
* 定义一个ModelAndView即可,
* 然后return;
* 定义视图文件(比如:error.html,error.ftl,error.jsp);
*
*/
}
}
复制代码com.hpit.test.web.DemoController?加入方法:@RequestMapping("/zeroException")
publicint zeroException(){
return 100/0;
}
复制代码访问:http://127.0.0.1:8080/zeroException 这个方法肯定是抛出异常的,那么在控制台就可以看到我们全局捕捉的异常信息了三、Spring boot JPA连接数据库在任何一个平台都逃离不了数据库的操作,那么在spring boot中怎么接入数据库呢?很简单,我们需要在application.properties进行配置一下,application.properties路径是src/main/resources下,对于application.properties更多的介绍请自行百度进行查找相关资料进行查看,在此不进行过多的介绍,以下只是mysql的配置文件。???????大体步骤:???????(1)在application.properties中加入datasouce的配置???????(2)在pom.xml加入mysql的依赖。????(3)获取DataSouce的Connection进行测试。src/main/resouces/application.properties:spring.datasource.url =?jdbc:mysql://localhost:3306/testspring.datasource.username =?rootspring.datasource.password =?rootspring.datasource.driverClassName =?com.mysql.jdbc.Driverspring.datasource.max-active=20spring.datasource.max-idle=8spring.datasource.min-idle=8spring.datasource.initial-size=10pom.xml配置:<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
复制代码到此相关配置就ok了,那么就可以在项目中进行测试了,我们可以新建一个class Demo进行测试,实体类创建完毕之后,我们可能需要手动进行编写建表语句,这时候我们可能就会想起Hibernate的好处了。那么怎么在spring boot使用Hibernate好的特性呢?So easy,具体怎么操作,请看下篇之JPA – Hibernate。作者:zhulin1028著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
ECS云服务器的初次使用体验
我是一名就读于网络空间安全专业的大三学生,平时做一些漏洞复现的时候需要一个公网IP与服务器搭建一些复现环境,在阿里云官网看到“飞天加速计划·高校学生在家实践”活动能够为在校高校生提供免费的ecs服务器,我觉得可以免费领取试用一段时间,方便自己熟悉一些网站环境的搭建,于是我便在飞天加速计划申请了ECS的免费使用资格。 使用过程对的来说是比较简单的,我通过shell工具远程ssh连接,这样就不用一直在网页操作shell窗口,对我们后续的服务器管理和操作提供很大的便捷。之后我在服务器上构建了java python运行环境,还搭建了RMI服务。之后尝试复现fastjson漏洞,中途有一个小插曲,因为在阿里云的安全组中,许多端口默认是不能访问的,需要你自己去放行要访问的端口,由于我网络与安全组配置是默认的,导致我监听的端口一直没有回显,让我误以为我的漏洞复现出错了,后来经过同学的帮助,是我的监听端口并没有开启,于是去网络与安全组中配置了需要开启的端口,之后就能收到回显,获取shell复现成功。 这段时间对esc云服务器的使用,让我对服务器的配置与linux的命令有了进一步的提高,现在我对服务器也有了一定的了解,我希望能继续体验服务器,想以后把自己的后台存在服务器上,数据也存在服务器上,当自己在搭建小型网站,建立个人博客,建立论坛社区,构建知识效率管理工具,建立个人学习环境时能更加方便,在此非常感谢阿里云的“飞天加速计划2.0”,让我们这些高校学生有了一个免费模拟真实服务器的现实环境,为我们的学习提供了很大的便捷,后续也会一直持续支持和关注阿里云服务器的相关产品。