-
Notifications
You must be signed in to change notification settings - Fork 15k
KAFKA-13850: Show missing record type in MetadataShell #12103
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -21,16 +21,22 @@ | |||||||||||||||||||||||
| import com.fasterxml.jackson.databind.ObjectMapper; | ||||||||||||||||||||||||
| import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; | ||||||||||||||||||||||||
| import org.apache.kafka.common.config.ConfigResource; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.AccessControlEntryRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.AccessControlEntryRecordJsonConverter; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.ClientQuotaRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.ClientQuotaRecord.EntityData; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.ConfigRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.FeatureLevelRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.FeatureLevelRecordJsonConverter; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.FenceBrokerRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.MetadataRecordType; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.PartitionChangeRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.PartitionRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.PartitionRecordJsonConverter; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.ProducerIdsRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.RegisterBrokerRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.RemoveTopicRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.TopicRecord; | ||||||||||||||||||||||||
| import org.apache.kafka.common.metadata.UnfenceBrokerRecord; | ||||||||||||||||||||||||
|
|
@@ -39,6 +45,8 @@ | |||||||||||||||||||||||
| import org.apache.kafka.common.utils.AppInfoParser; | ||||||||||||||||||||||||
| import org.apache.kafka.common.utils.LogContext; | ||||||||||||||||||||||||
| import org.apache.kafka.common.utils.Time; | ||||||||||||||||||||||||
| import org.apache.kafka.metadata.BrokerRegistrationFencingChange; | ||||||||||||||||||||||||
| import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange; | ||||||||||||||||||||||||
| import org.apache.kafka.queue.EventQueue; | ||||||||||||||||||||||||
| import org.apache.kafka.queue.KafkaEventQueue; | ||||||||||||||||||||||||
| import org.apache.kafka.raft.Batch; | ||||||||||||||||||||||||
|
|
@@ -302,6 +310,22 @@ private void handleCommitImpl(MetadataRecordType type, ApiMessage message) | |||||||||||||||||||||||
| create("isFenced").setContents("false"); | ||||||||||||||||||||||||
| break; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| case BROKER_REGISTRATION_CHANGE_RECORD: { | ||||||||||||||||||||||||
| BrokerRegistrationChangeRecord record = (BrokerRegistrationChangeRecord) message; | ||||||||||||||||||||||||
| BrokerRegistrationFencingChange fencingChange = | ||||||||||||||||||||||||
| BrokerRegistrationFencingChange.fromValue(record.fenced()).get(); | ||||||||||||||||||||||||
| if (fencingChange != BrokerRegistrationFencingChange.NONE) { | ||||||||||||||||||||||||
| data.root.mkdirs("brokers", Integer.toString(record.brokerId())) | ||||||||||||||||||||||||
| .create("isFenced").setContents(Boolean.toString(fencingChange.asBoolean().get())); | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| BrokerRegistrationInControlledShutdownChange inControlledShutdownChange = | ||||||||||||||||||||||||
| BrokerRegistrationInControlledShutdownChange.fromValue(record.inControlledShutdown()).get(); | ||||||||||||||||||||||||
| if (inControlledShutdownChange != BrokerRegistrationInControlledShutdownChange.NONE) { | ||||||||||||||||||||||||
| data.root.mkdirs("brokers", Integer.toString(record.brokerId())) | ||||||||||||||||||||||||
| .create("inControlledShutdown").setContents(Boolean.toString(inControlledShutdownChange.asBoolean().get())); | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| break; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| case REMOVE_TOPIC_RECORD: { | ||||||||||||||||||||||||
| RemoveTopicRecord record = (RemoveTopicRecord) message; | ||||||||||||||||||||||||
| DirectoryNode topicsDirectory = | ||||||||||||||||||||||||
|
|
@@ -333,6 +357,35 @@ private void handleCommitImpl(MetadataRecordType type, ApiMessage message) | |||||||||||||||||||||||
| producerIds.create("nextBlockStartId").setContents(record.nextProducerId() + ""); | ||||||||||||||||||||||||
| break; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| case ACCESS_CONTROL_ENTRY_RECORD: { | ||||||||||||||||||||||||
| AccessControlEntryRecord record = (AccessControlEntryRecord) message; | ||||||||||||||||||||||||
| DirectoryNode acls = data.root.mkdirs("acl").mkdirs("id"); | ||||||||||||||||||||||||
| FileNode file = acls.create(record.id().toString()); | ||||||||||||||||||||||||
| file.setContents(AccessControlEntryRecordJsonConverter.write(record, | ||||||||||||||||||||||||
| AccessControlEntryRecord.HIGHEST_SUPPORTED_VERSION).toPrettyString()); | ||||||||||||||||||||||||
| break; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| case REMOVE_ACCESS_CONTROL_ENTRY_RECORD: { | ||||||||||||||||||||||||
| RemoveAccessControlEntryRecord record = (RemoveAccessControlEntryRecord) message; | ||||||||||||||||||||||||
| DirectoryNode acls = data.root.mkdirs("acl").mkdirs("id"); | ||||||||||||||||||||||||
| acls.rmrf(record.id().toString()); | ||||||||||||||||||||||||
| break; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
| case FEATURE_LEVEL_RECORD: { | ||||||||||||||||||||||||
| FeatureLevelRecord record = (FeatureLevelRecord) message; | ||||||||||||||||||||||||
| DirectoryNode features = data.root.mkdirs("features"); | ||||||||||||||||||||||||
| if (record.featureLevel() == 0) { | ||||||||||||||||||||||||
| features.rmrf(record.name()); | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
| "apiKey": 12, | |
| "type": "metadata", | |
| "name": "FeatureLevelRecord", | |
| "validVersions": "0", | |
| "flexibleVersions": "0+", | |
| "fields": [ | |
| { "name": "Name", "type": "string", "versions": "0+", | |
| "about": "The feature name." }, | |
| { "name": "FeatureLevel", "type": "int16", "versions": "0+", | |
| "about": "The current finalized feature level of this feature for the cluster." } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Firstly we have a RemoveFeatureLevelRecord.json but we removed it in #12207 and we use 0-0 to represent not supported:
kafka/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java
Line 290 in 4b310d1
| if (record.featureLevel() == 0) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for sharing! What a magic 0, haha! Could we add a simple note into FeatureLevelRecord.json? Something like:
{ "name": "FeatureLevel", "type": "int16", "versions": "0+",
"about": "The current finalized feature level of this feature for the cluster. 0 means feature not enabled." }
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good suggestion.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,19 +18,31 @@ | |
| package org.apache.kafka.shell; | ||
|
|
||
| import org.apache.kafka.common.Uuid; | ||
| import org.apache.kafka.common.acl.AclOperation; | ||
| import org.apache.kafka.common.acl.AclPermissionType; | ||
| import org.apache.kafka.common.config.ConfigResource; | ||
| import org.apache.kafka.common.metadata.AccessControlEntryRecord; | ||
| import org.apache.kafka.common.metadata.AccessControlEntryRecordJsonConverter; | ||
| import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; | ||
| import org.apache.kafka.common.metadata.ClientQuotaRecord; | ||
| import org.apache.kafka.common.metadata.ConfigRecord; | ||
| import org.apache.kafka.common.metadata.FeatureLevelRecord; | ||
| import org.apache.kafka.common.metadata.FeatureLevelRecordJsonConverter; | ||
| import org.apache.kafka.common.metadata.FenceBrokerRecord; | ||
| import org.apache.kafka.common.metadata.PartitionChangeRecord; | ||
| import org.apache.kafka.common.metadata.PartitionRecord; | ||
| import org.apache.kafka.common.metadata.PartitionRecordJsonConverter; | ||
| import org.apache.kafka.common.metadata.ProducerIdsRecord; | ||
| import org.apache.kafka.common.metadata.RegisterBrokerRecord; | ||
| import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; | ||
| import org.apache.kafka.common.metadata.RemoveTopicRecord; | ||
| import org.apache.kafka.common.metadata.TopicRecord; | ||
| import org.apache.kafka.common.metadata.UnfenceBrokerRecord; | ||
| import org.apache.kafka.common.metadata.UnregisterBrokerRecord; | ||
| import org.apache.kafka.common.resource.PatternType; | ||
| import org.apache.kafka.common.resource.ResourceType; | ||
| import org.apache.kafka.metadata.BrokerRegistrationFencingChange; | ||
| import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange; | ||
| import org.apache.kafka.metadata.LeaderRecoveryState; | ||
| import org.junit.jupiter.api.AfterEach; | ||
| import org.junit.jupiter.api.BeforeEach; | ||
|
|
@@ -256,6 +268,61 @@ public void testUnfenceBrokerRecordAndFenceBrokerRecord() { | |
| metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents()); | ||
| } | ||
|
|
||
| @Test | ||
| public void testBrokerRegistrationChangeRecord() { | ||
| RegisterBrokerRecord record = new RegisterBrokerRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2); | ||
| metadataNodeManager.handleMessage(record); | ||
| assertEquals("true", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents()); | ||
|
|
||
| // Unfence broker | ||
| BrokerRegistrationChangeRecord record1 = new BrokerRegistrationChangeRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2) | ||
| .setFenced(BrokerRegistrationFencingChange.UNFENCE.value()); | ||
| metadataNodeManager.handleMessage(record1); | ||
| assertEquals("false", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents()); | ||
|
|
||
| // Fence broker | ||
| BrokerRegistrationChangeRecord record2 = new BrokerRegistrationChangeRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2) | ||
| .setFenced(BrokerRegistrationFencingChange.FENCE.value()); | ||
| metadataNodeManager.handleMessage(record2); | ||
| assertEquals("true", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents()); | ||
|
|
||
| // Unchanged | ||
| BrokerRegistrationChangeRecord record3 = new BrokerRegistrationChangeRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2) | ||
| .setFenced(BrokerRegistrationFencingChange.NONE.value()); | ||
| metadataNodeManager.handleMessage(record3); | ||
| assertEquals("true", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("isFenced").contents()); | ||
|
|
||
| // Controlled shutdown | ||
| BrokerRegistrationChangeRecord record4 = new BrokerRegistrationChangeRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2) | ||
| .setInControlledShutdown(BrokerRegistrationInControlledShutdownChange.IN_CONTROLLED_SHUTDOWN.value()); | ||
| metadataNodeManager.handleMessage(record4); | ||
| assertEquals("true", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("inControlledShutdown").contents()); | ||
|
|
||
| // Unchanged | ||
| BrokerRegistrationChangeRecord record5 = new BrokerRegistrationChangeRecord() | ||
| .setBrokerId(1) | ||
| .setBrokerEpoch(2) | ||
| .setInControlledShutdown(BrokerRegistrationInControlledShutdownChange.NONE.value()); | ||
| metadataNodeManager.handleMessage(record5); | ||
| assertEquals("true", | ||
| metadataNodeManager.getData().root().directory("brokers", "1").file("inControlledShutdown").contents()); | ||
| } | ||
|
|
||
| @Test | ||
| public void testClientQuotaRecord() { | ||
| ClientQuotaRecord record = new ClientQuotaRecord() | ||
|
|
@@ -336,4 +403,43 @@ public void testProducerIdsRecord() { | |
| 11000 + "", | ||
| metadataNodeManager.getData().root().directory("producerIds").file("nextBlockStartId").contents()); | ||
| } | ||
|
|
||
| @Test | ||
| public void testAccessControlEntryRecordAndRemoveAccessControlEntryRecord() { | ||
| AccessControlEntryRecord record1 = new AccessControlEntryRecord() | ||
| .setId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")) | ||
|
||
| .setHost("example.com") | ||
| .setResourceType(ResourceType.GROUP.code()) | ||
| .setResourceName("group") | ||
| .setOperation(AclOperation.READ.code()) | ||
| .setPermissionType(AclPermissionType.ALLOW.code()) | ||
| .setPrincipal("User:kafka") | ||
| .setPatternType(PatternType.LITERAL.code()); | ||
| metadataNodeManager.handleMessage(record1); | ||
| assertEquals( | ||
| AccessControlEntryRecordJsonConverter.write(record1, AccessControlEntryRecord.HIGHEST_SUPPORTED_VERSION).toPrettyString(), | ||
| metadataNodeManager.getData().root().directory("acl").directory("id").file("GcaQDl2UTsCNs1p9s37XkQ").contents()); | ||
|
|
||
| RemoveAccessControlEntryRecord record2 = new RemoveAccessControlEntryRecord() | ||
| .setId(Uuid.fromString("GcaQDl2UTsCNs1p9s37XkQ")); | ||
| metadataNodeManager.handleMessage(record2); | ||
| assertFalse(metadataNodeManager.getData().root().directory("acl").directory("id").children().containsKey("GcaQDl2UTsCNs1p9s37XkQ")); | ||
| } | ||
|
|
||
| @Test | ||
| public void testFeatureLevelRecord() { | ||
| FeatureLevelRecord record1 = new FeatureLevelRecord() | ||
| .setName("metadata.version") | ||
| .setFeatureLevel((short) 3); | ||
| metadataNodeManager.handleMessage(record1); | ||
| assertEquals( | ||
| FeatureLevelRecordJsonConverter.write(record1, FeatureLevelRecord.HIGHEST_SUPPORTED_VERSION).toPrettyString(), | ||
| metadataNodeManager.getData().root().directory("features").file("metadata.version").contents()); | ||
|
|
||
| FeatureLevelRecord record2 = new FeatureLevelRecord() | ||
| .setName("metadata.version") | ||
| .setFeatureLevel((short) 0); | ||
| metadataNodeManager.handleMessage(record2); | ||
| assertFalse(metadataNodeManager.getData().root().directory("features").children().containsKey("metadata.version")); | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just as KIP-778 described, each ACL is shown in /acl/id/ in its JSON form. however, I should mention here that their schema in ZkNode is more hierarchical: