一、核心类功能
- MySqlSourceEnumerator: 在 JobManager 上以单并行度运行, 会生成数据分片并将它们分配给 SourceReader
- MySqlSourceReader: 在 TaskManagers 上的 SourceOperators 并行运行, 会请求MySqlSourceEnumerator进行分片并进行处理,例如读取分片所表示的文件或日志分区
-
MySqlSplit(分片): 是对一部分 source 数据的包装,如一个文件或者日志分区。分片是 source 进行任务分配和数据并行读取的基本粒度
二、CDC同步示例
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.catalog.ObjectPath;
import java.time.Duration;
import java.util.Properties;
public class MysqlCDC {
public static void main(String[] args) throws Exception {
Properties debeziumProperties = new Properties();
debeziumProperties.put("decimal.handling.mode", "String");
MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
.hostname("master1")
.port(3306)
.scanNewlyAddedTableEnabled(true) // 开启支持新增表
.databaseList("test") // set captured database
.tableList("test.orders") // set captured table
.username("root")
.password("123456")
.serverTimeZone("UTC")
.serverId("20-40")
.includeSchemaChanges(true)
.heartbeatInterval(Duration.ofSeconds(30))
.startupOptions(StartupOptions.initial())
.scanNewlyAddedTableEnabled(true)
.chunkKeyColumn(new ObjectPath("test","orders"),"order_id")
.debeziumProperties(debeziumProperties)
.deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
.build();
Configuration configuration = new Configuration();
configuration.setInteger(RestOptions.PORT, 8081);
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(
configuration
);
// enable checkpoint
env.enableCheckpointing(3000);
// 设置本地
env.getCheckpointConfig().setCheckpointStorage("file:///tmp/flink-ck");
env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
// set 4 parallel source tasks
.setParallelism(4)
.print().setParallelism(1); // use parallelism 1 for sink to keep message ordering
env.execute("Print MySQL Snapshot + Binlog");
}
}
主要是构建MySqlSource对象,启动flink任务, MySqlSource继承了Source, 在任务启动时会在JobManager节点上执行createEnumerator方法创建MySqlSourceEnumerator对象, 并执行其start方法, 在TaskManager节点上会执行createReader方法创建MySqlSourceReader对象,并执行其start方法
三、MySqlSourceEnumerator
MySqlSourceEnumerator对象创建
@Override
public SplitEnumerator<MySqlSplit, PendingSplitsState> createEnumerator(
SplitEnumeratorContext<MySqlSplit> enumContext) {
MySqlSourceConfig sourceConfig = configFactory.createConfig(0, ENUMERATOR_SERVER_NAME);
/**
* 1.校验 mysql 版本必须>=5.6 以上
* 2.校验mysql format必须时ROW模式
* 3.校验mysql binlog_row_image 必须时FULL
* 4.校验time zone时区参数是否和mysql设置的一致
* */
final MySqlValidator validator = new MySqlValidator(sourceConfig);
validator.validate();
final MySqlSplitAssigner splitAssigner;
/**
* 如果CDC启动参数startupOptions设置的是INITIAL, 代表, 先同步全量数据, 再同步增量数据
* StartupMode 属性有以下几种取值:
* 1、StartupMode.LATEST_OFFSET:Binlog增量, 表示任务在启动时从最新的偏移量开始读取数据。
* 2、StartupMode.EARLIEST_OFFSET:Binlog增量,表示任务在启动时从最早的偏移量开始读取数据。
* 3、StartupMode.SPECIFIC_OFFSET:Binlog增量, 表示任务在启动时从指定的偏移量开始读取数据。
* 4、StartupMode.TIMESTAMP:表示任务在启动时从指定的时间戳开始读取数据。
*/
if (sourceConfig.getStartupOptions().startupMode == StartupMode.INITIAL) {
try (JdbcConnection jdbc = openJdbcConnection(sourceConfig)) {
boolean isTableIdCaseSensitive = DebeziumUtils.isTableIdCaseSensitive(jdbc);
// MySqlHybridSplitAssigner 混合方式, 实现先全量, 再增量
splitAssigner =
new MySqlHybridSplitAssigner(
sourceConfig,
enumContext.currentParallelism(),
new ArrayList<>(),
isTableIdCaseSensitive);
} catch (Exception e) {
throw new FlinkRuntimeException(
"Failed to discover captured tables for enumerator", e);
}
} else {
// MySqlBinlogSplitAssigner Binlog增量
splitAssigner = new MySqlBinlogSplitAssigner(sourceConfig);
}
return new MySqlSourceEnumerator(enumContext, sourceConfig, splitAssigner);
}
四、MySqlSourceReader
@Override
public SourceReader<T, MySqlSplit> createReader(SourceReaderContext readerContext)
throws Exception {
// create source config for the given subtask (e.g. unique server id)
MySqlSourceConfig sourceConfig =
configFactory.createConfig(readerContext.getIndexOfSubtask());
//数据存储队列
FutureCompletingBlockingQueue<RecordsWithSplitIds<SourceRecords>> elementsQueue =
new FutureCompletingBlockingQueue<>();
final Method metricGroupMethod = readerContext.getClass().getMethod("metricGroup");
metricGroupMethod.setAccessible(true);
final MetricGroup metricGroup = (MetricGroup) metricGroupMethod.invoke(readerContext);
// reader metrics 相关指标
/**
* 其中currentEmitEventTimeLag 指标记录的是 Source 发送一条记录到下游节点的时间点和该记录在 DB 里产生时间点差值,
* 用于衡量数据从 DB 产生到离开 Source 节点的延迟。用户可以通过该指标判断 source 是否进入了 binlog 读取阶段
*
* 如果等于0表示处于全量阶段, 非0表示处于增量阶段
* */
final MySqlSourceReaderMetrics sourceReaderMetrics =
new MySqlSourceReaderMetrics(metricGroup);
sourceReaderMetrics.registerMetrics();
MySqlSourceReaderContext mySqlSourceReaderContext =
new MySqlSourceReaderContext(readerContext);
// 通过supplier函数构建一个SplitReader
Supplier<MySqlSplitReader> splitReaderSupplier =
() ->
// 拿到每个reader的config和对应的subtask index
new MySqlSplitReader(
sourceConfig,
readerContext.getIndexOfSubtask(),
mySqlSourceReaderContext);
// 构建了一个具体的sourceReader
return new MySqlSourceReader<>(
elementsQueue,
splitReaderSupplier,
new MySqlRecordEmitter<>(
deserializationSchema,
sourceReaderMetrics,
sourceConfig.isIncludeSchemaChanges()),
readerContext.getConfiguration(),
mySqlSourceReaderContext,
sourceConfig);
}