Flink-1.13.0 Table Api & SQL Java Demo4(表转流输出)

1、导入依赖

      <!-- 使用table api 引入的依赖,使用桥接器和底层datastream api连接支持-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!--如果需要在本地运行table api和sql 还需要引入一下依赖-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!--如果想实现自定义的数据格式来做序列化,需要引入一下依赖-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-common</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!--连接外部数据格式解析,采用csv方式来解析-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-csv</artifactId>
            <version>${flink.version}</version>
        </dependency>

1.1、从文件中输入

路径:input/clicks.txt

Bob,./test/111,1000
Bob,./test/222,1000
Bob,./test/333,1000
Bob,./test/444,1000

2、表转流输出

package com.flinktest.wc;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class CommApiTest3 {
    public static void main(String[] args) throws Exception{
        // 创建执行环境的两种方式,流方式 & 表方式
        // 1 创建执行环境(流方式创建)
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 2 创建执行环境(表方式创建) 基于alibaba 的 blink planner实现
        //        EnvironmentSettings settings = EnvironmentSettings.newInstance()
        //                .inStreamingMode()
        //                .useBlinkPlanner()
        //                .build();
        //        TableEnvironment tableEnv = TableEnvironment.create(settings);

        // 3 创建一张连接器表(输入表)
        String createInDDL = "CREATE TABLE clickTable (" +
                "user_name STRING, " +
                "url STRING, " +
                "ts BIGINT " +
                ") WITH (" +
                " 'connector' = 'filesystem'," +
                " 'path' = 'input/clicks.txt'," +
                " 'format' = 'csv'" +
                ")";
        tableEnv.executeSql(createInDDL);
        // 执行聚合统计查询转换
        Table eggResult = tableEnv.sqlQuery("select user_name,COUNT(url) as cnt from clickTable group by user_name");
        // table 转流输出,聚合统计是动态表,所以使用Changelog的方式才能输出
        tableEnv.toChangelogStream(eggResult).print("egg");
        env.execute();
    }
}

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容