Blink流式计算-Kafka接入demo

本文涉及的产品
实时计算 Flink 版,5000CU*H 3个月
简介: //定义解析Kakfa message的UDTF CREATE FUNCTION myParse AS 'com.xxxxxx.MyKafkaUDTF'; CREATE FUNCTION myUdf AS 'com.xxxxxxx.MyWaterMarkUDTF'; //注意:kafka源表DDL字段必须与以下例子一致 create table my_input (

//定义解析Kakfa message的UDTF

CREATE FUNCTION myParse AS 'com.xxxxxx.MyKafkaUDTF';

CREATE FUNCTION myUdf AS 'com.xxxxxxx.MyWaterMarkUDTF';

//注意:kafka源表DDL字段必须与以下例子一致

create table my_input (

messageKey VARBINARY,

message VARBINARY,

topic varchar,

partition int,

offset bigint,

ctTime AS TO_TIMESTAMP (myUdf (message)),

//注意计算里的类型必须为timestamp才能在做watermark。

WATERMARK wk FOR ctTime AS WITHOFFSET (ctTime, 2000) --为rowtime定义watermark

) WITH (

type = 'KAFKA08',

topic = 'myTopic',

group.id = 'mGroup',

extraConfig = 'bootstrap.servers=127.0.0.1:9092',

zookeeper.connect = '127.0.0.1:2181',

startupMode = 'EARLISET'

);

-- 滚动窗口 group by prodId

CREATE VIEW input_view01 (

windowStart,

windowEnd,

prodId,

prodName,

prodNumber

) AS

SELECT

HOP_START (S.ctTime, INTERVAL '30' SECOND, INTERVAL '2' MINUTE),

HOP_END (S.ctTime, INTERVAL '30' SECOND, INTERVAL '2' MINUTE),

T.prodId as prodId,

T.prodName as prodName,

count (*) as prodNumber

from

my_input as S,

LATERAL TABLE (myParse (message)) as T (

id,

prodId,

prodName,

createdAt,

updatedAt

)

Group BY HOP (S.ctTime, INTERVAL '30' SECOND, INTERVAL '2' MINUTE), T.prodId, T.prodName;

CREATE VIEW input_view60 (

id,

prodId,

prodName,

createdAt,

updatedAt

) AS

SELECT

T.id,

T.prodId,

T.prodName,

T.createdAt,

T.updatedAt

from

my_input as S,

LATERAL TABLE (myParse (message)) as T (

id,

goCs,

prodId,

prodName,

createdAt,

updatedAt

);

-- 结果print

create table outprint01(

prodId bigint,

prodName varchar,

prodNumber bigint

)with(

type = 'print'

);

insert into outprint01

select prodId , prodName , prodNumber

from input_view01;

-- 结算结果写入Kafka

create table result_kafka (

messageKey VARBINARY,

message VARBINARY,

PRIMARY KEY (messageKey)

) with (

type = 'KAFKA08',

topic = 'myResultTopic',

extraConfig='bootstrap.servers=127.0.0.1:9092',

zookeeper.connect = '127.0.0.1:2181',

startupMode='EARLISET'

);

//此处的结果输出,可以考虑将结果组装成字符串,中间用|隔开,接收方再解析

INSERT INTO

result_kafka

SELECT

cast(prodId as VARBINARY) as messageKey,

cast(prodName as VARBINARY) as message

FROM

input_view01;

MyKafkaUDTF写法:

package com.xxxxxxxx;

import com.alibaba.fastjson.JSONObject;

import org.apache.flink.table.functions.TableFunction;

import org.apache.flink.table.types.DataType;

import org.apache.flink.table.types.DataTypes;

import org.apache.flink.types.Row;

import java.io.UnsupportedEncodingException;

import java.sql.Timestamp;

public class MyKafkaUDTF extends TableFunction {

public void eval(byte[] message) {

try {

String msg = new String(message, "UTF-8");

System.out.println("收到的消息:"+msg);

try {

JSONObject jsonObject = JSONObject.parseObject(msg);

if (jsonObject != null) {

//id

Long id = jsonObject.getLong("id");

//prodId

Long prodId = jsonObject.getLong("prodId");

//prodName

String prodName = jsonObject.getString("prodName ");

Long createAt = jsonObject.getLong("createdAt");

Long updatedAt = jsonObject.getLong("updatedAt");

//创建时间时间戳

Timestamp createAtTimeStamp = new Timestamp(createAt);

Timestamp updatedAtTimeStamp = new Timestamp(updatedAt);

Row row = new Row(8);

row.setField(0, id);

row.setField(1, prodId);

row.setField(2, prodName);

row.setField(3, createAtTimeStamp );

row.setField(4, updatedAtTimeStamp );

System.out.println("message str ==>" + row.toString());

collect(row);

}

} catch (Exception e) {

e.printStackTrace();

System.out.println(" error. Input data " + msg + "is not json string");

}

} catch (UnsupportedEncodingException e) {

e.printStackTrace();

}

}

@Override

// 如果返回值是Row,就必须重载实现这个方法,显式地告诉系统返回的字段类型

public DataType getResultType(Object[] arguments, Class[] argTypes) {

return DataTypes.createRowType(

DataTypes.LONG,

DataTypes.LONG,

DataTypes.STRING,

DataTypes.TIMESTAMP,

DataTypes.TIMESTAMP);

}

}

package xxxxxxx;

import com.alibaba.fastjson.JSONObject;

import org.apache.flink.table.functions.ScalarFunction;

import java.text.SimpleDateFormat;

import java.util.Date;

public class MyWaterMarkUDTF extends ScalarFunction {

public String eval(byte[] message) {

try {

String msg = new String(message, "UTF-8");

JSONObject data = JSONObject.parseObject(msg);

System.out.println("time:"+data.getString("createdAt"));

Long createAtLong = data.getLong("createdAt");

SimpleDateFormat parser = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

String createTimeStr = parser.format(new Date(createAtLong));

return createTimeStr;

} catch (Exception e) {

e.printStackTrace();

}

return null;

}

//可选,close方法可以不写

@Override

public void close() {

}

}

目录
相关文章
|
7月前
|
消息中间件 Java Kafka
kafka入门demo
kafka入门demo
81 0
|
消息中间件 数据可视化 关系型数据库
(3)sparkstreaming从kafka接入实时数据流最终实现数据可视化展示
1)我们通过kafka与各个业务系统的数据对接,将各系统中的数据实时接到kafka; 2)通过sparkstreaming接入kafka数据流,定义时间窗口和计算窗口大小,业务计算逻辑处理; 3)将结果数据写入到mysql; 4)通过可视化平台接入mysql数据库,这里使用的是NBI大数据可视化构建平台; 5)在平台上通过拖拽式构建各种数据应用,数据展示;
(3)sparkstreaming从kafka接入实时数据流最终实现数据可视化展示
|
2月前
|
消息中间件 Java Kafka
Flink-04 Flink Java 3分钟上手 FlinkKafkaConsumer消费Kafka数据 进行计算SingleOutputStreamOperatorDataStreamSource
Flink-04 Flink Java 3分钟上手 FlinkKafkaConsumer消费Kafka数据 进行计算SingleOutputStreamOperatorDataStreamSource
62 1
|
6月前
|
消息中间件 网络协议 Java
springboot+netty+kafka实现设备信息收集(完整demo复制可用)
springboot+netty+kafka实现设备信息收集(完整demo复制可用)
101 0
|
消息中间件 分布式计算 Kafka
将Apache Flink任务实时消费Kafka窗口的计算改为MaxCompute
将Apache Flink任务实时消费Kafka窗口的计算改为MaxCompute
143 6
|
消息中间件 Java Kafka
Kafka+Avro的demo
Kafka+Avro的demo
166 0
|
消息中间件 存储 弹性计算
基于Kafka connect+函数计算的轻量计算解决方案
Kafka ETL基于kafka connect加函数计算,为云上用户提供了一套数据流转加数据计算的一站式解决方案。
722 0
|
消息中间件 存储 分布式计算
【Kafka】(十五)流式计算 Kafka Streams 架构深入2
【Kafka】(十五)流式计算 Kafka Streams 架构深入2
756 0
|
消息中间件 存储 分布式计算
【Kafka】(十五)流式计算 Kafka Streams 架构深入1
【Kafka】(十五)流式计算 Kafka Streams 架构深入1
733 0
|
消息中间件 运维 安全
Kafka的灵魂伴侣Logi-KafkaManger(1)之集群的接入及相关概念讲解
Kafka的灵魂伴侣Logi-KafkaManger(1)之集群的接入及相关概念讲解
Kafka的灵魂伴侣Logi-KafkaManger(1)之集群的接入及相关概念讲解

热门文章

最新文章