01
consumer_offsets 的基本概念
public class ConsumerOffsetDemo {private final KafkaConsumer<String, String> consumer;private final String topic;private final String groupId;public ConsumerOffsetDemo(String bootstrapServers, String topic, String groupId) {Properties props = new Properties();props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());// 关闭自动提交,手动控制位移提交props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");this.consumer = new KafkaConsumer<>(props);this.topic = topic;this.groupId = groupId;}public void consumeAndCommit() {try {consumer.subscribe(Collections.singletonList(topic));while (true) {ConsumerRecords<String, String> records =consumer.poll(Duration.ofMillis(100));for (ConsumerRecord<String, String> record : records) {// 处理消息processRecord(record);// 手动提交单条消息的位移Map<TopicPartition, OffsetAndMetadata> offsets =Collections.singletonMap(new TopicPartition(record.topic(), record.partition()),new OffsetAndMetadata(record.offset() + 1));consumer.commitSync(offsets);}}} finally {consumer.close();}}}
02
位移提交机制
public class OffsetMonitor {private final AdminClient adminClient;private final KafkaConsumer<byte[], byte[]> consumer;public OffsetMonitor(String bootstrapServers) {Properties props = new Properties();props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);this.adminClient = AdminClient.create(props);props.put(ConsumerConfig.GROUP_ID_CONFIG, "offset-monitor");props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,ByteArrayDeserializer.class.getName());props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,ByteArrayDeserializer.class.getName());this.consumer = new KafkaConsumer<>(props);}public Map<String, ConsumerGroupOffset> getConsumerGroupOffsets(String groupId) {Map<String, ConsumerGroupOffset> result = new HashMap<>();try {// 获取消费者组的位移信息ListConsumerGroupOffsetsResult offsetsResult =adminClient.listConsumerGroupOffsets(groupId);Map<TopicPartition, OffsetAndMetadata> offsets =offsetsResult.partitionsToOffsetAndMetadata().get();// 获取主题的结束位移Map<TopicPartition, Long> endOffsets =consumer.endOffsets(offsets.keySet());// 计算消费延迟for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {TopicPartition tp = entry.getKey();long committedOffset = entry.getValue().offset();long endOffset = endOffsets.get(tp);long lag = endOffset - committedOffset;result.put(tp.topic(), new ConsumerGroupOffset(committedOffset, endOffset, lag));}} catch (Exception e) {e.printStackTrace();}return result;}}
03
位移管理和运维
public class OffsetManager {private final AdminClient adminClient;public OffsetManager(String bootstrapServers) {Properties props = new Properties();props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);this.adminClient = AdminClient.create(props);}// 重置消费者组位移public void resetOffset(String groupId, String topic, int partition, long offset) {try {TopicPartition tp = new TopicPartition(topic, partition);Map<TopicPartition, OffsetAndMetadata> offsetMap =Collections.singletonMap(tp, new OffsetAndMetadata(offset));adminClient.alterConsumerGroupOffsets(groupId, offsetMap).all().get();System.out.printf("Successfully reset offset for group=%s, topic=%s, " +"partition=%d to %d%n",groupId, topic, partition, offset);} catch (Exception e) {e.printStackTrace();}}// 删除消费者组public void deleteConsumerGroup(String groupId) {try {adminClient.deleteConsumerGroups(Collections.singleton(groupId)).all().get();System.out.printf("Successfully deleted consumer group: %s%n", groupId);} catch (Exception e) {e.printStackTrace();}}// 监控消费延迟public void monitorConsumerLag(String groupId, String topic) {try {TopicPartition tp = new TopicPartition(topic, 0);Map<TopicPartition, OffsetAndMetadata> offsetMap =adminClient.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();long currentOffset = offsetMap.get(tp).offset();long endOffset = getEndOffset(tp);long lag = endOffset - currentOffset;if (lag > 10000) { // 设置告警阈值System.out.printf("Warning: High lag detected for group=%s, topic=%s: %d%n",groupId, topic, lag);}} catch (Exception e) {e.printStackTrace();}}private long getEndOffset(TopicPartition tp) {try (KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(new Properties())) {Map<TopicPartition, Long> endOffsets = consumer.endOffsets(Collections.singleton(tp));return endOffsets.get(tp);}}}
04
加群请添加作者

05
获取文档资料

欢迎关注我的公众号【大数据技能圈】,获取更多大数据技术干货!
推荐阅读系列文章
建议收藏 | Dinky系列总结篇 建议收藏 | Flink系列总结篇 建议收藏 | Flink CDC 系列总结篇 建议收藏 | Doris实战文章合集 建议收藏 | Paimon 实战文章总结 建议收藏 | Fluss 实战文章总结 建议收藏 | Seatunnel 实战文章系列合集 建议收藏 | 实时离线输数仓(数据湖)总结篇 建议收藏 | 实时离线数仓实战第一阶段总结 超700star!电商项目数据湖建设实战代码 ,拿来即用! 从0到1建设电商项目数据湖实战教程 推荐一套开源电商项目数据湖建设实战代码
如果喜欢 请点个在看分享给身边的朋友
文章转载自大数据技能圈,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




