开发者社区> 问答> 正文

Canal Admin + Server 订阅MySQL的数据发送至Kafka失败

Canal Admin + Server 订阅MySQL的数据发送至Kafka,Canal 没有报错但 Kafka中一直没有数据,MySQL从库正常

Canal 版本:v1.1.5

集群状态:

提问112.png

集群配置 canal.properties:

################################################# ######### common argument ############# #################################################

tcp bind ip

canal.ip =

register ip to zookeeper

canal.register.ip = canal.port = 11111 canal.metrics.pull.port = 11112

canal instance user/passwd

canal.user = canal

canal.passwd = E3619321C1A937C46A0D8BD1DAC39F93B27D4458

canal admin config

#canal.admin.manager = 127.0.0.1:8089 canal.admin.port = 11110 canal.admin.user = admin canal.admin.passwd = 4ACFE3202A5FF5CF467898FC58AAB1D615029441

admin auto register

#canal.admin.register.auto = true #canal.admin.register.cluster = #canal.admin.register.name =

canal.zkServers = 10.69.8.12:2181,10.69.8.13:2181,10.69.8.14:2181

flush data to zk

canal.zookeeper.flush.period = 1000 canal.withoutNetty = false

tcp, kafka, rocketMQ, rabbitMQ

canal.serverMode = tcp

flush meta cursor/parse position to file

canal.file.data.dir = ${canal.conf.dir} canal.file.flush.period = 1000

memory store RingBuffer size, should be Math.pow(2,n)

canal.instance.memory.buffer.size = 16384

memory store RingBuffer used memory unit size , default 1kb

canal.instance.memory.buffer.memunit = 1024

meory store gets mode used MEMSIZE or ITEMSIZE

canal.instance.memory.batch.mode = MEMSIZE canal.instance.memory.rawEntry = true

detecing config

canal.instance.detecting.enable = false #canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now() canal.instance.detecting.sql = select 1 canal.instance.detecting.interval.time = 3 canal.instance.detecting.retry.threshold = 3 canal.instance.detecting.heartbeatHaEnable = false

support maximum transaction size, more than the size of the transaction will be cut into multiple transactions delivery

canal.instance.transaction.size = 1024

mysql fallback connected to new master should fallback times

canal.instance.fallbackIntervalInSeconds = 60

network config

canal.instance.network.receiveBufferSize = 16384 canal.instance.network.sendBufferSize = 16384 canal.instance.network.soTimeout = 30

binlog filter config

canal.instance.filter.druid.ddl = true canal.instance.filter.query.dcl = false canal.instance.filter.query.dml = false canal.instance.filter.query.ddl = false canal.instance.filter.table.error = false canal.instance.filter.rows = false canal.instance.filter.transaction.entry = false canal.instance.filter.dml.insert = false canal.instance.filter.dml.update = false canal.instance.filter.dml.delete = false

binlog format/image check

canal.instance.binlog.format = ROW,STATEMENT,MIXED canal.instance.binlog.image = FULL,MINIMAL,NOBLOB

binlog ddl isolation

canal.instance.get.ddl.isolation = false

parallel parser config

canal.instance.parser.parallel = true

concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()

#canal.instance.parser.parallelThreadSize = 16

disruptor ringbuffer size, must be power of 2

canal.instance.parser.parallelBufferSize = 256

table meta tsdb info

canal.instance.tsdb.enable = true canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:} canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL; canal.instance.tsdb.dbUsername = canal canal.instance.tsdb.dbPassword = canal

dump snapshot interval, default 24 hour

canal.instance.tsdb.snapshot.interval = 24

purge snapshot expire , default 360 hour(15 days)

canal.instance.tsdb.snapshot.expire = 360

################################################# ######### destinations ############# ################################################# canal.destinations =

conf root dir

canal.conf.dir = ../conf

auto scan instance dir add/remove and start/stop instance

canal.auto.scan = true canal.auto.scan.interval = 5

set this value to 'true' means that when binlog pos not found, skip to latest.

WARN: pls keep 'false' in production env, or if you know what you want.

canal.auto.reset.latest.pos.mode = false

canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml #canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml

canal.instance.global.mode = manager canal.instance.global.lazy = false canal.instance.global.manager.address = ${canal.admin.manager} #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml canal.instance.global.spring.xml = classpath:spring/file-instance.xml #canal.instance.global.spring.xml = classpath:spring/default-instance.xml

################################################## ######### MQ Properties ############# ##################################################

canal.mq.flatMessage = false canal.mq.canalBatchSize = 50 canal.mq.canalGetTimeout = 100

Set this value to "cloud", if you want open message trace feature in aliyun.

canal.mq.accessChannel = local

canal.mq.database.hash = true canal.mq.send.thread.size = 30 canal.mq.build.thread.size = 8

################################################## ######### Kafka ############# ################################################## kafka.bootstrap.servers = 10.69.8.12:9092,10.69.8.13:9092,10.69.8.14:9092 kafka.acks = all kafka.compression.type = none kafka.batch.size = 16384 kafka.linger.ms = 1 kafka.max.request.size = 1048576 kafka.buffer.memory = 33554432 kafka.max.in.flight.requests.per.connection = 1 kafka.retries = 0

kafka.kerberos.enable = false kafka.kerberos.krb5.file = "../conf/kerberos/krb5.conf" kafka.kerberos.jaas.file = "../conf/kerberos/jaas.conf"

instance 配置:

#################################################

mysql serverId , v1.0.26+ will autoGen

canal.instance.mysql.slaveId=0

enable gtid use true/false

canal.instance.gtidon=false

position info

canal.instance.master.address=10.69.8.12:3309 canal.instance.master.journal.name= canal.instance.master.position= canal.instance.master.timestamp= canal.instance.master.gtid=

rds oss binlog

canal.instance.rds.accesskey= canal.instance.rds.secretkey= canal.instance.rds.instanceId=

table meta tsdb info

canal.instance.tsdb.enable=true #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb #canal.instance.tsdb.dbUsername=canal #canal.instance.tsdb.dbPassword=canal

#canal.instance.standby.address = #canal.instance.standby.journal.name = #canal.instance.standby.position = #canal.instance.standby.timestamp = #canal.instance.standby.gtid=

username/password

canal.instance.dbUsername=root canal.instance.dbPassword=xjVRZi40GMt6TNFO canal.instance.connectionCharset = UTF-8

enable druid Decrypt database password

canal.instance.enableDruid=false #canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==

table regex

canal.instance.filter.regex=chirp.dynamic_tb,chirp.post_tb

table black regex

canal.instance.filter.black.regex=

table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)

#canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch

table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)

#canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch

mq config

canal.mq.topic=chirp_default

dynamic topic route by schema or table regex

canal.mq.dynamicTopic=chirp_dynamic:chirp.dynamic_tb,chirp_post:chirp.post_tb canal.mq.partitionsNum=10 canal.mq.partitionHash=.\..:id canal.mq.dynamicTopicPartitionNum=chirp.dynamic_tb:10,chirp.post_tb:10 #################################################

原提问者GitHub用户jaceding

展开
收起
山海行 2023-04-27 16:56:16 151 0
1 条回答
写回答
取消 提交回答
  • 你这个问题是 没指定kafka canal.serverMode = kafka

    过滤条件也有问题,看看官网提示 DBname.tableName

    原回答者GitHub用户mrchicn

    2023-04-28 12:34:17
    赞同 展开评论 打赏
问答排行榜
最热
最新

相关电子书

更多
Java Spring Boot开发实战系列课程【第16讲】:Spring Boot 2.0 实战Apache Kafka百万级高并发消息中间件与原理解析 立即下载
MaxCompute技术公开课第四季 之 如何将Kafka数据同步至MaxCompute 立即下载
消息队列kafka介绍 立即下载

相关镜像