flink standalone环境下的日志如下:
2025-11-21 15:15:58
java.lang.RuntimeException: One or more fetchers have encountered exception
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcherManager.checkErrors(SplitFetcherManager.java:333)
at org.apache.flink.connector.base.source.reader.SourceReaderBase.getNextFetch(SourceReaderBase.java:228)
at org.apache.flink.connector.base.source.reader.SourceReaderBase.pollNext(SourceReaderBase.java:190)
at org.apache.flink.streaming.api.operators.SourceOperator.emitNext(SourceOperator.java:422)
at org.apache.flink.streaming.runtime.io.StreamTaskSourceInput.emitNext(StreamTaskSourceInput.java:68)
at org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processInput(StreamOneInputProcessor.java:65)
at org.apache.flink.streaming.runtime.tasks.StreamTask.processInput(StreamTask.java:579)
at org.apache.flink.streaming.runtime.tasks.mailbox.MailboxProcessor.runMailboxLoop(MailboxProcessor.java:231)
at org.apache.flink.streaming.runtime.tasks.StreamTask.runMailboxLoop(StreamTask.java:909)
at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:858)
at org.apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:958)
at org.apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:937)
at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:751)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:566)
at java.base/java.lang.Thread.run(Thread.java:834)
Caused by: java.lang.RuntimeException: SplitFetcher thread 0 received unexpected exception while polling the records
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.runOnce(SplitFetcher.java:168)
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.run(SplitFetcher.java:117)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
... 1 more
Caused by: java.io.IOException: org.apache.flink.util.FlinkRuntimeException: Read split StreamSplit{splitId='stream-split', offset={resumeToken={"_data": "826915E4CE000000012B0229296E04"}, timestamp=7572209921907752961}, endOffset={resumeToken=null, timestamp=9223372034707292159}, isSuspended=false} error due to Command failed with error 286 (ChangeStreamHistoryLost): 'Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.' on server 10.9.3.206:27027. The full response is {"ok": 0.0, "errmsg": "Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.", "code": 286, "codeName": "ChangeStreamHistoryLost", "operationTime": {"$timestamp": {"t": 1763709336, "i": 5}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1763709344, "i": 2}}, "signature": {"hash": {"$binary": {"base64": "7cZCjtaknSHuyZBIDd/H7VTPEHo=", "subType": "00"}}, "keyId": 7533257841298113139}}, "errorLabels": ["NonResumableChangeStreamError"]}.
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.fetch(IncrementalSourceSplitReader.java:101)
at org.apache.flink.connector.base.source.reader.fetcher.FetchTask.run(FetchTask.java:58)
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.runOnce(SplitFetcher.java:165)
... 6 more
Caused by: org.apache.flink.util.FlinkRuntimeException: Read split StreamSplit{splitId='stream-split', offset={resumeToken={"_data": "826915E4CE000000012B0229296E04"}, timestamp=7572209921907752961}, endOffset={resumeToken=null, timestamp=9223372034707292159}, isSuspended=false} error due to Command failed with error 286 (ChangeStreamHistoryLost): 'Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.' on server 10.9.3.206:27027. The full response is {"ok": 0.0, "errmsg": "Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.", "code": 286, "codeName": "ChangeStreamHistoryLost", "operationTime": {"$timestamp": {"t": 1763709336, "i": 5}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1763709344, "i": 2}}, "signature": {"hash": {"$binary": {"base64": "7cZCjtaknSHuyZBIDd/H7VTPEHo=", "subType": "00"}}, "keyId": 7533257841298113139}}, "errorLabels": ["NonResumableChangeStreamError"]}.
at org.apache.flink.cdc.connectors.base.source.reader.external.IncrementalSourceStreamFetcher.checkReadException(IncrementalSourceStreamFetcher.java:137)
at org.apache.flink.cdc.connectors.base.source.reader.external.IncrementalSourceStreamFetcher.pollSplitRecords(IncrementalSourceStreamFetcher.java:115)
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.pollSplitRecords(IncrementalSourceSplitReader.java:192)
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.fetch(IncrementalSourceSplitReader.java:98)
... 8 more
Caused by: com.mongodb.MongoQueryException: Command failed with error 286 (ChangeStreamHistoryLost): 'Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.' on server 10.9.3.206:27027. The full response is {"ok": 0.0, "errmsg": "Error on remote shard mongodb-node03:27127 :: caused by :: Resume of change stream was not possible, as the resume point may no longer be in the oplog.", "code": 286, "codeName": "ChangeStreamHistoryLost", "operationTime": {"$timestamp": {"t": 1763709336, "i": 5}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1763709344, "i": 2}}, "signature": {"hash": {"$binary": {"base64": "7cZCjtaknSHuyZBIDd/H7VTPEHo=", "subType": "00"}}, "keyId": 7533257841298113139}}, "errorLabels": ["NonResumableChangeStreamError"]}
at com.mongodb.internal.operation.QueryHelper.translateCommandException(QueryHelper.java:29)
at com.mongodb.internal.operation.QueryBatchCursor.lambda$getMore$1(QueryBatchCursor.java:292)
at com.mongodb.internal.operation.QueryBatchCursor$ResourceManager.executeWithConnection(QueryBatchCursor.java:514)
at com.mongodb.internal.operation.QueryBatchCursor.getMore(QueryBatchCursor.java:282)
at com.mongodb.internal.operation.QueryBatchCursor.tryHasNext(QueryBatchCursor.java:235)
at com.mongodb.internal.operation.QueryBatchCursor.lambda$tryNext$0(QueryBatchCursor.java:218)
at com.mongodb.internal.operation.QueryBatchCursor$ResourceManager.execute(QueryBatchCursor.java:407)
at com.mongodb.internal.operation.QueryBatchCursor.tryNext(QueryBatchCursor.java:217)
at com.mongodb.internal.operation.ChangeStreamBatchCursor.lambda$tryNext$4(ChangeStreamBatchCursor.java:97)
at com.mongodb.internal.operation.ChangeStreamBatchCursor.resumeableOperation(ChangeStreamBatchCursor.java:189)
at com.mongodb.internal.operation.ChangeStreamBatchCursor.tryNext(ChangeStreamBatchCursor.java:95)
at com.mongodb.client.internal.MongoChangeStreamCursorImpl.tryNext(MongoChangeStreamCursorImpl.java:90)
at org.apache.flink.cdc.connectors.mongodb.source.reader.fetch.MongoDBStreamFetchTask.execute(MongoDBStreamFetchTask.java:124)
at org.apache.flink.cdc.connectors.base.source.reader.external.IncrementalSourceStreamFetcher.lambda$submitTask$0(IncrementalSourceStreamFetcher.java:89)
... 5 more
版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。
实时计算Flink版是阿里云提供的全托管Serverless Flink云服务,基于 Apache Flink 构建的企业级、高性能实时大数据处理系统。提供全托管版 Flink 集群和引擎,提高作业开发运维效率。