d. KafkaWordCountBolt
package com.shaonaiyi.kafka; import org.apache.storm.task.OutputCollector; import org.apache.storm.task.TopologyContext; import org.apache.storm.topology.OutputFieldsDeclarer; import org.apache.storm.topology.base.BaseRichBolt; import org.apache.storm.tuple.Fields; import org.apache.storm.tuple.Tuple; import org.apache.storm.tuple.Values; import java.util.HashMap; import java.util.Map; /** * @author: shaonaiyi * @createTime: 2019/07/14 13:42 * @description: 单词计数bolt */ public class KafkaWordCountBolt extends BaseRichBolt { private OutputCollector collector; private HashMap<String, Long> counts = null; // 用于统计每隔单词的计数 @Override public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) { this.collector = outputCollector; this.counts = new HashMap<String, Long>(); } @Override public void execute(Tuple tuple) { // 实时接收SplitSentenceBolt中输出的Tuple流 String word = tuple.getStringByField("word"); // 根据key获取Tuple中的单词 // 统计每一个单词总共出现的次数 Long count = counts.getOrDefault(word, 0L); count++; this.counts.put(word, count); // 将每一个单词以及这个单词出现的次数作为Tuple中的value输出到下一个bolt中 this.collector.emit(new Values(word, count.toString())); } @Override public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { // 输出Tuple的key,有两个key,是因为每次输出的value也有两个 outputFieldsDeclarer.declare(new Fields("key", "message")); } }
e. WordCountKafkaTopology
package com.shaonaiyi.kafka; import org.apache.storm.Config; import org.apache.storm.StormSubmitter; import org.apache.storm.generated.AlreadyAliveException; import org.apache.storm.generated.AuthorizationException; import org.apache.storm.generated.InvalidTopologyException; import org.apache.storm.kafka.bolt.KafkaBolt; import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper; import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector; import org.apache.storm.kafka.spout.KafkaSpout; import org.apache.storm.topology.TopologyBuilder; import org.apache.storm.tuple.Fields; import java.util.Arrays; import java.util.Properties; /** * @author: shaonaiyi * @createTime: 2019/07/15 22:54 * @description: Kafka之WordCountTopology */ public class WordCountKafkaTopology { private static final String SENTENCE_SPOUT_ID = "sentence-spout"; private static final String SPLIT_BOLT_ID = "split-bolt"; private static final String COUNT_BOLT_ID = "count-bolt"; private static final String KAFKA_BOLT_ID = "kafka-bolt"; private static final String TOPOLOGY_NAME = "word-count-topology"; public static void main(String[] args) throws InvalidTopologyException, AuthorizationException, AlreadyAliveException { int workers = Integer.parseInt(args[0]); // 从Kafka中消费数据 KafkaSpout kafkaSpout = new KafkaSpoutBuilder() .brokers(Arrays.asList("master:9092")) .topic("word-count-input") .build(); KafkaSplitSentenceBolt splitSentenceBolt = new KafkaSplitSentenceBolt(); KafkaWordCountBolt wordCountBolt = new KafkaWordCountBolt(); Properties props = new Properties(); props.put("bootstrap.servers", "master:9092"); // 此配置是表明当一次produce请求被认为完成时的确认值。 // 特别是,多少个其他brokers必须已经提交了数据到他们的log并且向他们的leader确认了这些信息。典型的值包括: // 0: 表示producer从来不等待来自broker的确认信息(和0.7一样的行为)。 // 这个选择提供了最小的时延但同时风险最大(因为当server宕机时,数据将会丢失)。 // 1:表示获得leader replica已经接收了数据的确认信息。这个选择时延较小同时确保了server确认接收成功。 // -1:producer会获得所有同步replicas都收到数据的确认 props.put("acks", "1"); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaBolt kafkaBolt = new KafkaBolt() .withProducerProperties(props) .withTopicSelector(new DefaultTopicSelector("word-count-output")) .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SENTENCE_SPOUT_ID, kafkaSpout); builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID); builder.setBolt(COUNT_BOLT_ID, wordCountBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word")); builder.setBolt(KAFKA_BOLT_ID, kafkaBolt).shuffleGrouping(COUNT_BOLT_ID); // 3、提交Topology Config config = new Config(); // 用来配置Topology运行时行为,对Topology所有组件全局生效的配置参数集合 config.setNumWorkers(workers); StormSubmitter.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); // 提交Topology } }
0x03 校验结果
1. 打包Storm代码
a. 打包
b. 上传到集群
2. 执行ZK与Storm
此步骤与教程:实时流处理框架之Storm的安装与部署
=>
0x03 启动并校验Storm 步骤一样
即:
a. 启动集群上的三台Zookeeper(查看进程是否存在,如果Kafka已经启动,应该还有Kafka的进程)
b. 启动Storm
在master上启动Nimbus和Web UI
cd ~/bigdata/apache-storm-1.2.2
nohup bin/storm nimbus 2>&1 &
然后回车,切换终端2,执行:
nohup bin/storm ui 2>&1 &
然后回车
在slave1和slave2上启动Supervisor
cd ~/bigdata/apache-storm-1.2.2
nohup bin/storm supervisor 2>&1 &
3. 执行Storm作业
a. 执行Storm作业
~/bigdata/apache-storm-1.2.2/bin/storm jar /home/hadoop-sny/jar/stormlearning-1.0-SNAPSHOT-jar-with-dependencies.jar com.shaonaiyi.kafka.WordCountKafkaTopology 1
b. 查看Web UI界面(master:8080
)
4. 校验过程
a. 目前各节点的进程情况
b. 发送消息到Kafka
c. 查看消费者信息
d. 查看Storm的Web UI界面
0xFF 总结
- 在生产者端多发送几个语句,你会发现这种统计的结果,并不是我们真正想要的结果,思考应该怎样才能想我们前面学习WordCount那种表现形式,请看后面的教程。
- 内容比较多,请大家认真操作。