logger打印正常更变的数据,无法同步到es中,
java.lang.RuntimeException: NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{pPHaJwc_Rpaz6XSEviY_WA}{192.168.56.117}{192.168.56.117:9300}]] at com.alibaba.otter.canal.client.adapter.es.core.service.ESSyncService.sync(ESSyncService.java:116) ~[na:na] at com.alibaba.otter.canal.client.adapter.es.core.service.ESSyncService.sync(ESSyncService.java:64) ~[na:na] at com.alibaba.otter.canal.client.adapter.es.core.ESAdapter.sync(ESAdapter.java:115) ~[na:na] at com.alibaba.otter.canal.client.adapter.es.core.ESAdapter.sync(ESAdapter.java:94) ~[na:na] at com.alibaba.otter.canal.adapter.launcher.loader.AdapterProcessor.batchSync(AdapterProcessor.java:139) ~[client-adapter.launcher-1.1.5.jar:na] at com.alibaba.otter.canal.adapter.launcher.loader.AdapterProcessor.lambda$null$1(AdapterProcessor.java:97) ~[client-adapter.launcher-1.1.5.jar:na] at java.util.concurrent.CopyOnWriteArrayList.forEach(CopyOnWriteArrayList.java:891) ~[na:1.8.0_251] at com.alibaba.otter.canal.adapter.launcher.loader.AdapterProcessor.lambda$null$2(AdapterProcessor.java:94) ~[client-adapter.launcher-1.1.5.jar:na] at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[na:1.8.0_251] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[na:1.8.0_251] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[na:1.8.0_251] at java.lang.Thread.run(Thread.java:748) ~[na:1.8.0_251] Caused by: org.elasticsearch.client.transport.NoNodeAvailableException: None of the configured nodes are available: [{#transport#-1}{pPHaJwc_Rpaz6XSEviY_WA}{192.168.56.117}{192.168.56.117:9300}] at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:352) ~[na:na] at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:248) ~[na:na] at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:57) ~[na:na] at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:394) ~[na:na] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:392) ~[na:na] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:381) ~[na:na] at org.elasticsearch.client.support.AbstractClient$ClusterAdmin.execute(AbstractClient.java:675) ~[na:na]
application.yml
server: port: 8081 spring: jackson: date-format: yyyy-MM-dd HH:mm:ss time-zone: GMT+8 default-property-inclusion: non_null
canal.conf: mode: tcp #tcp kafka rocketMQ rabbitMQ flatMessage: true zookeeperHosts: syncBatchSize: 1000 retries: 0 timeout: accessKey: secretKey: consumerProperties: # canal tcp consumer canal.tcp.server.host: 192.168.56.117:11111 #canal.tcp.zookeeper.hosts: canal.tcp.batch.size: 500 #canal.tcp.username: #canal.tcp.password: # kafka consumer kafka.bootstrap.servers: 127.0.0.1:9092 kafka.enable.auto.commit: false kafka.auto.commit.interval.ms: 1000 kafka.auto.offset.reset: latest kafka.request.timeout.ms: 40000 kafka.session.timeout.ms: 30000 kafka.isolation.level: read_committed kafka.max.poll.records: 1000 # rocketMQ consumer rocketmq.namespace: rocketmq.namesrv.addr: 127.0.0.1:9876 rocketmq.batch.size: 1000 rocketmq.enable.message.trace: false rocketmq.customized.trace.topic: rocketmq.access.channel: rocketmq.subscribe.filter: # rabbitMQ consumer rabbitmq.host: rabbitmq.virtual.host: rabbitmq.username: rabbitmq.password: rabbitmq.resource.ownerId:
srcDataSources: defaultDS: url: jdbc:mysql://127.0.0.1:3306/cloud_wall?useUnicode=true username: root password: a9530.A. canalAdapters: - instance: example groups: - groupId: g1 outerAdapters: - name: logger - name: es7 key: exampleKey hosts: 127.0.0.1:9300 # es 集群地址, 逗号分隔 properties: mode: rest#可指定transport模式或者rest模式 # security.auth: test:123456 # only used for rest mode cluster.name: elasticsearch # es cluster name
es7.yml
dataSourceKey: defaultDS outerAdapterKey: exampleKey destination: example groupId: g1 esMapping: _index: ana _id: _id upsert: true
sql: "SELECT a.id as _id, a.user_id, u.user_nick_name, a.ana_type_id, t.ana_type_name, a.ana_title, a.ana_content, a.comment_num, a.prize_num, a.create_date, a.update_date FROM ana a LEFT JOIN ana_type t ON a.ana_type_id = t.id LEFT JOIN user
u ON a.user_id = u.id"
etlCondition: "where a.create_date>={'0'}" commitBatch: 3000
原提问者GitHub用户caipeishen
请确认您的 Elasticsearch 节点是否已经启动并且可用。如果您的 Elasticsearch 节点已经启动,请检查您的网络设置和防火墙设置是否正确,以确保您的 Canal 适配器可以连接到 Elasticsearch 节点。如果您的 Elasticsearch 节点未启动,请启动它并确认它已经成功启动。最后,请检查您的 Canal 适配器配置文件是否正确配置了 Elasticsearch 节点的地址和端口。
版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。