注意:mysql、redis等连接密码需修改为相应值
Seata-Server
环境
- 版本:1.4.2
- OS: CentOS Linux release 7.5.1804 (Core)
- ip:192.168.1.78
Registry.conf
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "192.168.1.78:8848"
group = "SEATA_GROUP"
namespace = ""
cluster = "default"
username = ""
password = ""
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "192.168.1.78:8848"
namespace = ""
group = "SEATA_GROUP"
username = ""
password = ""
dataId = "seataServer.properties"
}
}
seataServer.properties
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=true
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.my_test_tx_group=default
#service.my_test_tx_group.grouplist=192.168.1.78:8091
service.default.grouplist=192.168.1.78:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.rm.sagaJsonParser=fastjson
client.rm.tccActionInterceptorOrder=-2147482648
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
client.tm.interceptorOrder=-2147482648
store.mode=db
store.lock.mode=file
#store.session.mode=file
store.session.mode=db
store.publicKey=
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://192.168.1.235:3306/seata?useUnicode=true&rewriteBatchedStatements=true
store.db.user=root
store.db.password=123456
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.distributedLockTable=distributed_lock
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
store.redis.mode=single
store.redis.single.host=192.168.1.78
store.redis.single.port=6379
store.redis.sentinel.masterName=
store.redis.sentinel.sentinelHosts=
store.redis.maxConn=10
store.redis.minConn=1
store.redis.maxTotal=100
store.redis.database=0
store.redis.password=123456
store.redis.queryLimit=100
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
server.distributedLockExpireTime=10000
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k
log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
tcc.fence.logTableName=tcc_fence_log
tcc.fence.cleanPeriod=1h
seata-server.sh
没有修改过,1.4.2发布版本是什么样就是什么样
启动命令
sh seata-server.sh -m db -h 192.168.1.78 -p 8091
Seata-Client
SeataClient是直接集成到我们的业务微服务中,SeataClient的原理是代理我们jdbc数据源,在应用程序和数据库之间加了一层,通过添加的这一层来做事务管理。
下面虚拟了两个业务微服务,一个叫seata-user-service,一个叫seata-car-service。
关于SeataServer和SeataClient的版本对应问题,当前暂未找到相关官方的说明文档
环境
- SpringBoot:2.5.3
- SpringCloud:2020.0.3
SpringCloudAlibaba:2021.1
pom
网上有一些文章说引入pom依赖之后,需要手动配置相关容器类,咱们使用的是高版本,引入 spring-cloud-starter-alibaba-seata 后,SpringBoot能够为我们完成所有的自动配置,这一点需要注意,手动配置和自动配置不可同时存在,这点官网上面有说明。
~~~<dependency> <groupId>org.springframework.cloud</groupId> <artifactId>spring-cloud-dependencies</artifactId> <version>${spring-cloud.version}</version> <type>pom</type> <scope>import</scope> </dependency>
<!-- SpringCloud Alibaba 微服务 --> <dependency> <groupId>com.alibaba.cloud</groupId> <artifactId>spring-cloud-alibaba-dependencies</artifactId> <version>${spring-cloud-alibaba.version}</version> <type>pom</type> <scope>import</scope> </dependency>
com.alibaba.cloud
spring-cloud-starter-alibaba-seata
## seata-user-service
### application.yml
重点关注两个地方
* seata.registry:seataClient通过这个配置来找到SeataServer
* spring.cloud.alibaba.seata.tx-service-group:通过这个值来确定事务分组,当前配置的值my_test_tx_group,对应seataServer.properties中的service.vgroupMapping.my_test_tx_group=default
事务分组是为了逻辑上做高可用而设计的一个东西,从某种意义上来说本身就是一个非必须的东西,关于事务分组具体如何使用后续再整理文档说明
server:
port: 9302
spring:
application:
name: seata-user-service
cloud:
nacos:
discovery:
# 服务注册地址
server-addr: 192.168.1.78:8848
alibaba:
seata:
tx-service-group: my_test_tx_group
logging:
level:
io.seata: debug
seata:
registry:
type: nacos
nacos:
server-addr: 192.168.1.78
group: SEATA_GROUP
cluster: default
## seata-car-service
### application.yml
server:
port: 9301
spring:
application:
name: seata-car-service
cloud:
nacos:
discovery:
# 服务注册地址
server-addr: 192.168.1.78:8848
alibaba:
seata:
tx-service-group: my_test_tx_group
logging:
level:
io.seata: debug
seata:
registry:
type: nacos
nacos:
server-addr: 192.168.1.78:8848
group: SEATA_GROUP
cluster: default
~~~