Skip to content

KAFKA-14691; Add TopicId to OffsetFetch API #19515

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
May 13, 2025
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package org.apache.kafka.common.requests;

import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.OffsetFetchRequestData;
import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup;
Expand Down Expand Up @@ -71,7 +72,8 @@ public Builder(String groupId,
boolean requireStable,
List<TopicPartition> partitions,
boolean throwOnFetchStableOffsetsUnsupported) {
super(ApiKeys.OFFSET_FETCH);
// It can only be used with topic names.
super(ApiKeys.OFFSET_FETCH, ApiKeys.OFFSET_FETCH.oldestVersion(), (short) 9);

OffsetFetchRequestData.OffsetFetchRequestGroup group =
new OffsetFetchRequestData.OffsetFetchRequestGroup()
Expand Down Expand Up @@ -103,7 +105,8 @@ public Builder(String groupId,
public Builder(Map<String, List<TopicPartition>> groupIdToTopicPartitionMap,
boolean requireStable,
boolean throwOnFetchStableOffsetsUnsupported) {
super(ApiKeys.OFFSET_FETCH);
// It can only be used with topic names.
super(ApiKeys.OFFSET_FETCH, ApiKeys.OFFSET_FETCH.oldestVersion(), (short) 9);

List<OffsetFetchRequestGroup> groups = new ArrayList<>();
for (Entry<String, List<TopicPartition>> entry : groupIdToTopicPartitionMap.entrySet()) {
Expand Down Expand Up @@ -134,6 +137,13 @@ public Builder(Map<String, List<TopicPartition>> groupIdToTopicPartitionMap,
this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported;
}

public Builder(OffsetFetchRequestData data, boolean throwOnFetchStableOffsetsUnsupported) {
super(ApiKeys.OFFSET_FETCH);
this.data = data;
this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported;
}

@SuppressWarnings("CyclomaticComplexity")
@Override
public OffsetFetchRequest build(short version) {
if (data.groups().size() > 1 && version < 8) {
Expand All @@ -151,6 +161,29 @@ public OffsetFetchRequest build(short version) {
data.setRequireStable(false);
}
}
if (version >= 10) {
data.groups().forEach(group -> {
if (group.topics() != null) {
group.topics().forEach(topic -> {
if (topic.topicId() == null || topic.topicId().equals(Uuid.ZERO_UUID)) {
throw new UnsupportedVersionException("The broker offset fetch api version " +
version + " does require usage of topic ids.");
}
});
}
});
} else {
data.groups().forEach(group -> {
if (group.topics() != null) {
group.topics().forEach(topic -> {
if (topic.name() == null || topic.name().isEmpty()) {
throw new UnsupportedVersionException("The broker offset fetch api version " +
version + " does require usage of topic names.");
}
});
}
});
}
// convert data to use the appropriate version since version 8 uses different format
if (version < 8) {
OffsetFetchRequestData normalizedData;
Expand Down Expand Up @@ -350,4 +383,8 @@ public boolean isAllPartitionsForGroup(String groupId) {
public OffsetFetchRequestData data() {
return data;
}

public static boolean useTopicIds(short version) {
return version >= 10;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,11 @@
//
// Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). It adds
// the MemberId and MemberEpoch fields. Those are filled in and validated when the new consumer protocol is used.
"validVersions": "1-9",
//
// Version 10 adds support for topic ids and removes support for topic names (KIP-848).
"validVersions": "1-10",
"flexibleVersions": "6+",
"latestVersionUnstable": true,
"fields": [
{ "name": "GroupId", "type": "string", "versions": "0-7", "entityType": "groupId",
"about": "The group to fetch offsets for." },
Expand All @@ -60,8 +63,10 @@
"about": "The member epoch if using the new consumer protocol (KIP-848)." },
{ "name": "Topics", "type": "[]OffsetFetchRequestTopics", "versions": "8+", "nullableVersions": "8+",
"about": "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.", "fields": [
{ "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName",
{ "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true,
"about": "The topic name."},
{ "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true,
"about": "The topic ID." },
{ "name": "PartitionIndexes", "type": "[]int32", "versions": "8+",
"about": "The partition indexes we would like to fetch offsets for." }
]}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@
// Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is
// the same as version 8 but can return STALE_MEMBER_EPOCH and UNKNOWN_MEMBER_ID errors when the new consumer group
// protocol is used.
"validVersions": "1-9",
//
// Version 10 adds support for topic ids and removes support for topic names (KIP-848).
"validVersions": "1-10",
"flexibleVersions": "6+",
// Supported errors:
// - GROUP_AUTHORIZATION_FAILED (version 0+)
Expand All @@ -49,6 +51,7 @@
// - UNSTABLE_OFFSET_COMMIT (version 7+)
// - UNKNOWN_MEMBER_ID (version 9+)
// - STALE_MEMBER_EPOCH (version 9+)
// - UNKNOWN_TOPIC_ID (version 10+)
"fields": [
{ "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true,
"about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." },
Expand Down Expand Up @@ -78,8 +81,10 @@
"about": "The group ID." },
{ "name": "Topics", "type": "[]OffsetFetchResponseTopics", "versions": "8+",
"about": "The responses per topic.", "fields": [
{ "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName",
{ "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true,
"about": "The topic name." },
{ "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true,
"about": "The topic ID." },
{ "name": "Partitions", "type": "[]OffsetFetchResponsePartitions", "versions": "8+",
"about": "The responses per partition.", "fields": [
{ "name": "PartitionIndex", "type": "int32", "versions": "8+",
Expand Down
80 changes: 66 additions & 14 deletions core/src/main/scala/kafka/server/KafkaApis.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,8 @@ class KafkaApis(val requestChannel: RequestChannel,
offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup,
requireStable: Boolean
): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = {
val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion)

groupCoordinator.fetchAllOffsets(
requestContext,
offsetFetchRequest,
Expand All @@ -1043,13 +1045,33 @@ class KafkaApis(val requestChannel: RequestChannel,
offsetFetchResponse
} else {
// Clients are not allowed to see offsets for topics that are not authorized for Describe.
val (authorizedOffsets, _) = authHelper.partitionSeqByAuthorized(
val authorizedNames = authHelper.filterByAuthorized(
requestContext,
DESCRIBE,
TOPIC,
offsetFetchResponse.topics.asScala
)(_.name)
offsetFetchResponse.setTopics(authorizedOffsets.asJava)

val topics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics]
offsetFetchResponse.topics.forEach { topic =>
if (authorizedNames.contains(topic.name)) {
if (useTopicIds) {
// If the topic is not provided by the group coordinator, we set it
// using the metadata cache.
if (topic.topicId == Uuid.ZERO_UUID) {
topic.setTopicId(metadataCache.getTopicId(topic.name))
}
// If we don't have the topic id at all, we skip the topic because
// we can not serialize it without it.
if (topic.topicId != Uuid.ZERO_UUID) {
topics += topic
}
} else {
topics += topic
}
}
}
offsetFetchResponse.setTopics(topics.asJava)
}
}
}
Expand All @@ -1059,14 +1081,53 @@ class KafkaApis(val requestChannel: RequestChannel,
offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup,
requireStable: Boolean
): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = {
val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion)

if (useTopicIds) {
offsetFetchRequest.topics.forEach { topic =>
if (topic.topicId != Uuid.ZERO_UUID) {
metadataCache.getTopicName(topic.topicId).ifPresent(name => topic.setName(name))
}
}
}

// Clients are not allowed to see offsets for topics that are not authorized for Describe.
val (authorizedTopics, unauthorizedTopics) = authHelper.partitionSeqByAuthorized(
val authorizedTopicNames = authHelper.filterByAuthorized(
requestContext,
DESCRIBE,
TOPIC,
offsetFetchRequest.topics.asScala
)(_.name)

val authorizedTopics = new mutable.ArrayBuffer[OffsetFetchRequestData.OffsetFetchRequestTopics]
val errorTopics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics]

def buildErrorResponse(
topic: OffsetFetchRequestData.OffsetFetchRequestTopics,
error: Errors
): OffsetFetchResponseData.OffsetFetchResponseTopics = {
val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setTopicId(topic.topicId)
.setName(topic.name)
topic.partitionIndexes.forEach { partitionIndex =>
topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partitionIndex)
.setCommittedOffset(-1)
.setErrorCode(error.code))
}
topicResponse
}

offsetFetchRequest.topics.forEach { topic =>
if (useTopicIds && topic.name.isEmpty) {
errorTopics += buildErrorResponse(topic, Errors.UNKNOWN_TOPIC_ID)
} else if (!authorizedTopicNames.contains(topic.name)) {
errorTopics += buildErrorResponse(topic, Errors.TOPIC_AUTHORIZATION_FAILED)
} else {
authorizedTopics += topic
}
}

groupCoordinator.fetchOffsets(
requestContext,
new OffsetFetchRequestData.OffsetFetchRequestGroup()
Expand All @@ -1084,19 +1145,10 @@ class KafkaApis(val requestChannel: RequestChannel,
offsetFetchResponse
} else {
val topics = new util.ArrayList[OffsetFetchResponseData.OffsetFetchResponseTopics](
offsetFetchResponse.topics.size + unauthorizedTopics.size
offsetFetchResponse.topics.size + errorTopics.size
)
topics.addAll(offsetFetchResponse.topics)
unauthorizedTopics.foreach { topic =>
val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic.name)
topic.partitionIndexes.forEach { partitionIndex =>
topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partitionIndex)
.setCommittedOffset(-1)
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code))
}
topics.add(topicResponse)
}
topics.addAll(errorTopics.asJava)
offsetFetchResponse.setTopics(topics)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package kafka.server
import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.{TopicPartition, Uuid}
import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchResponseData, SyncGroupResponseData}
import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchRequestData, OffsetFetchResponseData, SyncGroupResponseData}
import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.test.ClusterInstance
Expand Down Expand Up @@ -883,7 +883,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord
.setGroupId(groupId)
.setTopics(List(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName("foo")
.setName(if (version < 10) "foo" else "")
.setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID)
.setPartitions(List(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(0)
Expand All @@ -897,14 +898,16 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord
).asJava)
).asJava),
fetchOffsets(
groupId = groupId,
memberId = memberId1,
memberEpoch = 1,
partitions = List(
new TopicPartition("foo", 0),
new TopicPartition("foo", 1),
new TopicPartition("foo", 2)
),
group = new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(groupId)
.setMemberId(memberId1)
.setMemberEpoch(1)
.setTopics(List(
new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName("foo")
.setTopicId(topicId)
.setPartitionIndexes(List[Integer](0, 1, 2).asJava)
).asJava),
requireStable = false,
version = version.toShort
)
Expand Down Expand Up @@ -1183,7 +1186,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord
.setGroupId(groupId)
.setTopics(List(
new OffsetFetchResponseData.OffsetFetchResponseTopics()
.setName("foo")
.setName(if (version < 10) "foo" else "")
.setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID)
.setPartitions(List(
new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(0)
Expand All @@ -1197,15 +1201,17 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord
).asJava)
).asJava),
fetchOffsets(
groupId = groupId,
memberId = memberId1,
memberEpoch = 1,
partitions = List(
new TopicPartition("foo", 0),
new TopicPartition("foo", 1),
new TopicPartition("foo", 2)
),
requireStable = false,
group = new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(groupId)
.setMemberId(memberId1)
.setMemberEpoch(1)
.setTopics(List(
new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName("foo")
.setTopicId(topicId)
.setPartitionIndexes(List[Integer](0, 1, 2).asJava)
).asJava),
requireStable = true,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this change to true intentional?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope, let me revert it.

version = version.toShort
)
)
Expand Down
Loading
Loading