############################# Server Basics #############################
# 唯一的 kafka 服务器 id,每个 broker 都不一样
broker.id=0
############################# Socket Server Settings #############################
# 允许删掉主题
delete.topic.enable=true
# 监听ip和端口,listeners表示非kafka集群内的机器访问kafka
listeners = PLAINTEXT://192.168.229.147:9092
# 节点的主机名会通知给生产者和消费者
advertised.listeners=PLAINTEXT://192.168.229.147:9092
# 接受网络请求的线程数
num.network.threads=3
# 进行磁盘IO的线程数
num.io.threads=8
# 套接字服务器使用的发送缓冲区大小
socket.send.buffer.bytes=102400
# 套接字服务器使用的接收缓冲区大小
socket.receive.buffer.bytes=102400
# 单个请求最大能接收的数据量
socket.request.max.bytes=104857600
############################# Log Basics #############################
# 用来存储日志文件夹
log.dirs=/tmp/kafka-logs
# 每个主题的日志分区的默认数量。更多的分区允许更大的并行操作,但是它会导致节点产生更多的文件。副本数量,建议设置成3,免得单点故障
num.partitions=1
# 每个数据目录中的线程数,用于在启动时日志恢复,并在关闭时刷新。
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# 强制刷新数据到磁盘之前要接受的消息数
#log.flush.interval.messages=10000
# 在强制刷新之前,消息可以在日志中占用的最长时间
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# 日志文件最大字节,超过后重新创建新日志文件
log.segment.bytes=1073741824
# 检查日志段以查看是否可以根据保留策略删除日志段的时间间隔
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0