package com.ctgu.flink.project;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.util.Collector;
import java.util.Random;
public class Flink_Sql_Pv {
public static void main(String[] args) throws Exception {
long start = System.currentTimeMillis();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(8);
EnvironmentSettings settings = EnvironmentSettings
.newInstance()
.inStreamingMode()
.build();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
String createSql =
"CREATE TABLE source " +
" (" +
" `userId` BIGINT," +
" `itemId` BIGINT," +
" `categoryId` INT," +
" `behavior` STRING," +
" `ts` BIGINT" +
" )" +
" WITH (" +
" 'connector'='filesystem'," +
" 'format'='csv'," +
" 'csv.field-delimiter'=','," +
" 'path'='data/UserBehavior.csv'" +
" )";
tableEnv.executeSql(createSql);
String userBehavior = "select *, ts * 1000 as `timestamp` from source where behavior = 'pv'";
Table userBehaviorTable = tableEnv.sqlQuery(userBehavior);
DataStream<Row> rowDataStream = tableEnv.toDataStream(userBehaviorTable);
Table source =
tableEnv.fromDataStream(
rowDataStream,
Schema.newBuilder()
.columnByExpression("time_ltz", "TO_TIMESTAMP_LTZ(`timestamp`, 3)")
.watermark("time_ltz", "time_ltz - INTERVAL '5' SECOND")
.build());
tableEnv.createTemporaryView("userBehavior", source);
DataStream<Row> dataStream = tableEnv.toDataStream(source);
DataStream<Tuple2<Long, Long>> sum = dataStream.filter(data -> "pv".equals(data.getField("behavior")))
.map(new MyMapFunction())
.keyBy(data -> data.f0)
.window(TumblingEventTimeWindows.of(Time.hours(1)))
.aggregate(new AverageAggregate(), new MyWindowFunction())
.keyBy(data -> data.f0)
.process(new MyProcessFunction());
sum.print();
env.execute("Table SQL");
System.out.println("耗时: " + (System.currentTimeMillis() - start) / 1000);
}
private static class MyMapFunction
extends RichMapFunction<Row, Tuple2<Integer, Long>>{
@Override
public Tuple2<Integer, Long> map(Row row) throws Exception {
Random random = new Random();
return new Tuple2<>(random.nextInt(10), 1L);
}
}
private static class AverageAggregate
implements AggregateFunction<Tuple2<Integer, Long>, Long, Long> {
@Override
public Long createAccumulator() {
return 0L;
}
@Override
public Long add(Tuple2<Integer, Long> integerLongTuple2, Long aLong) {
return aLong + 1;
}
@Override
public Long getResult(Long aLong) {
return aLong;
}
@Override
public Long merge(Long a, Long b) {
return a + b;
}
}
private static class MyWindowFunction
implements WindowFunction<Long, Tuple2<Long, Long>, Integer, TimeWindow> {
@Override
public void apply(Integer integer,
TimeWindow timeWindow,
Iterable<Long> iterable,
Collector<Tuple2<Long, Long>> collector) throws Exception {
collector.collect(new Tuple2<>(timeWindow.getEnd(), iterable.iterator().next()));
}
}
private static class MyProcessFunction
extends KeyedProcessFunction<Long, Tuple2<Long, Long>, Tuple2<Long, Long>> {
private ValueState<Long> totalCountState;
@Override
public void open(Configuration parameters) throws Exception {
totalCountState = getRuntimeContext().getState(new ValueStateDescriptor<>(
"total-count", Long.class, 0L));
}
@Override
public void processElement(Tuple2<Long, Long> tuple2, Context context, Collector<Tuple2<Long, Long>> collector) throws Exception {
totalCountState.update(totalCountState.value() + tuple2.f1);
context.timerService().registerEventTimeTimer(tuple2.f0 + 1);
}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<Tuple2<Long, Long>> out) throws Exception {
Long totalCount = totalCountState.value();
out.collect(new Tuple2<>(ctx.getCurrentKey(), totalCount));
totalCountState.clear();
}
}
}
Flink-5.Flink 随机key数据倾斜
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 原理 为数据量特别大的Key增加随机前/后缀,使得原来Key相同的数据变为Key不相同的数据,从而使倾斜的数据集分...
- 在使用reduceByKey,groupByKey算子时,都是针对PairRDD进行操作,那么,我们就可以Pair...
- 一、方案 使用随机key实现双重聚合 1、原理 2、使用场景 比较适合使用这种方式;join,咱们通常不会这样来做...
- 一、背景 当采用随机数和扩容表进行join解决数据倾斜的时候,就代表着,你的之前的数据倾斜的解决方案,都没法使用。...
- 我的一位同学,工作上遇到这么一个问题,问我怎么解决。问题是这样的: “Spark写 Hive 的时候,有3...