kafka事务控制

kafka事务控制

kafka在0.11.0.0在引入幂等性概念的同时也引入了事务的概念。
一般来说默认消费者消费的信息级别是read_uncommited数据;这有可能读取到事务失败的数据,所以在开启生产者事务之后,需要用户设置消费者的事务隔离级别

1.创建生产者并开启事务控制
public class KafkaProducerTranactions {

    public static void main(String[] args) {
        KafkaProducer<String,String> producer = buildKafkaProducer();
        //初始化事务控制
        producer.initTransactions();
        try {
            producer.beginTransaction();
            for (int i = 0; i < 3; i++) {
            //测试消费者消费等级
                /*if(i==8){
                    int j = 10/0;
                }*/
                ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i+"fufu");
                producer.send(record);
                producer.flush();
            }
            //提交事务
            producer.commitTransaction();
        } catch (Exception e) {
            System.err.println("出错了:~"+e.getMessage());
            //关闭事务
            producer.abortTransaction();
        }finally {
            producer.close();
        }
    }

    public static KafkaProducer buildKafkaProducer(){

        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"ip:9092");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

        //配置事务ID 必须时唯一的
        props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID().toString());

        //配置kafka批处理大小
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,1024);
        //等待5ms如果batch数据不足 1024 大小
        props.put(ProducerConfig.LINGER_MS_CONFIG,5);

        //配置kafka重试机制和幂等性
        props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);
        props.put(ProducerConfig.ACKS_CONFIG,"all");
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,20000);

        return new KafkaProducer<>(props);
    }
}
2.创建消费者并设置事务隔离级别
public class KafkaConsumerTransCommited {

    public static void main(String[] args) {
        KafkaConsumer consumer = buildKafkaConsumer("g1");

        //订阅相关的topics
        consumer.subscribe(Arrays.asList("topic01"));

        //遍历消息队列
        while(true){
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
            if(!records.isEmpty()){//从队列中取到了数据
                Iterator<ConsumerRecord<String, String>> iterator = records.iterator();
                while(iterator.hasNext()){
                    //获取一个消费消息
                    ConsumerRecord<String, String> record = iterator.next();
                    String topic = record.topic();
                    int partition = record.partition();
                    long offset = record.offset();

                    String key = record.key();
                    String value = record.value();
                    long timestamp = record.timestamp();

                    System.out.println(topic+"\t"+partition+","+offset+"\t"+key+" "+value+" "+timestamp);
                }
            }
        }
    }

    public static  KafkaConsumer buildKafkaConsumer(String groupId){
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"ip:9092");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);

        //设置消费者消费事务的隔离基本read_commited
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");
       //props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_uncommitted");
        return new KafkaConsumer<>(props);
    }
}

当事务不能提交时,read_committed级别是不能消费到消息的;read_uncommitted可以消费消息。

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。