This repository has been archived by the owner on Jan 24, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 137
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[FEATURE] Add commit offset test for Kafka clients of different versi…
…ons (#686) Fixes #681 #687 ### Motivation #605 added a test framework for Kafka clients of different versions. However, it only added the basic e2e test, an important API commitOffset was not verified. ### Modifications Add commit offset test in BasicEndToEndPulsarTest and BasicEndToEndKafkaTest in io.streamnative.pulsar.handlers.kop.compatibility package. This feature belongs to the compatibility issue between different versions of kafka client and kop.
- Loading branch information
1 parent
40e6182
commit 13004f7
Showing
13 changed files
with
487 additions
and
24 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
57 changes: 57 additions & 0 deletions
57
kafka-client-api/src/main/java/io/streamnative/kafka/client/api/TopicOffsetAndMetadata.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
/** | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package io.streamnative.kafka.client.api; | ||
|
||
import java.lang.reflect.InvocationTargetException; | ||
import lombok.AllArgsConstructor; | ||
import lombok.Getter; | ||
|
||
/** | ||
* A completable class of org.apache.kafka.clients.consumer.OffsetAndMetadata. | ||
*/ | ||
@AllArgsConstructor | ||
@Getter | ||
public class TopicOffsetAndMetadata { | ||
|
||
private String topic; | ||
private int partition; | ||
private long offset; | ||
|
||
public <T> T createTopicPartition(final Class<T> clazz) { | ||
try { | ||
return clazz.getConstructor( | ||
String.class, int.class | ||
).newInstance(topic, partition); | ||
} catch (InvocationTargetException | ||
| InstantiationException | ||
| IllegalAccessException | ||
| NoSuchMethodException e) { | ||
throw new IllegalArgumentException(e); | ||
} | ||
} | ||
|
||
public <T> T createOffsetAndMetadata(final Class<T> clazz) { | ||
try { | ||
return clazz.getConstructor( | ||
long.class | ||
).newInstance(offset); | ||
} catch (InstantiationException | ||
| IllegalAccessException | ||
| InvocationTargetException | ||
| NoSuchMethodException e) { | ||
throw new IllegalArgumentException(e); | ||
} | ||
|
||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
140 changes: 140 additions & 0 deletions
140
kafka-impl/src/main/java/io/streamnative/pulsar/handlers/kop/utils/KopRecordsUtil.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
/** | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package io.streamnative.pulsar.handlers.kop.utils; | ||
|
||
import java.io.IOException; | ||
import java.nio.ByteBuffer; | ||
import java.util.ArrayList; | ||
import java.util.List; | ||
import lombok.extern.slf4j.Slf4j; | ||
import org.apache.kafka.common.record.AbstractRecords; | ||
import org.apache.kafka.common.record.ConvertedRecords; | ||
import org.apache.kafka.common.record.MemoryRecords; | ||
import org.apache.kafka.common.record.MemoryRecordsBuilder; | ||
import org.apache.kafka.common.record.Record; | ||
import org.apache.kafka.common.record.RecordBatch; | ||
import org.apache.kafka.common.record.TimestampType; | ||
|
||
/** | ||
* Utils for DownConverted and ReAssignOffset operations. | ||
*/ | ||
@Slf4j | ||
public class KopRecordsUtil { | ||
|
||
public static ConvertedRecords<MemoryRecords> convertAndAssignOffsets(Iterable<? extends RecordBatch> batches, | ||
byte toMagic, | ||
long firstOffset) throws IOException { | ||
// maintain the batch along with the decompressed records to avoid the need to decompress again | ||
List<RecordBatchAndRecords> recordBatchAndRecordsList = new ArrayList<>(); | ||
int totalSizeEstimate = 0; | ||
|
||
long batchStartOffset = firstOffset; | ||
for (RecordBatch batch : batches) { | ||
byte toBatchMagic = toMagic; | ||
if (toMagic < RecordBatch.MAGIC_VALUE_V2) { | ||
if (batch.isControlBatch()) { | ||
continue; | ||
} | ||
|
||
if (batch.compressionType().name.equals("zstd")) { | ||
throw new IOException("Down-conversion of zstandard-compressed batches " | ||
+ "is not supported"); | ||
} | ||
} | ||
|
||
List<Record> records = new ArrayList<>(); | ||
long batchIndex = 0; | ||
for (Record record : batch) { | ||
records.add(record); | ||
batchIndex++; | ||
} | ||
|
||
if (records.isEmpty()) { | ||
continue; | ||
} | ||
|
||
// handle the batch.magic() <= toMagic case | ||
// Since the internal message set of magic 0 and magic 1 has an offset, | ||
// the internal offset may be destroyed, so we still need | ||
// to deal with the message of batch.magic <= toMagic. | ||
// The only thing that remains unchanged is to ensure that the magic remains unchanged. | ||
if (batch.magic() < toMagic) { | ||
toBatchMagic = batch.magic(); | ||
} | ||
|
||
totalSizeEstimate += AbstractRecords.estimateSizeInBytes( | ||
toBatchMagic, batchStartOffset, batch.compressionType(), records); | ||
recordBatchAndRecordsList.add(new RecordBatchAndRecords(batch, records, batchStartOffset, toBatchMagic)); | ||
batchStartOffset += batchIndex; | ||
} | ||
|
||
ByteBuffer buffer = ByteBuffer.allocate(totalSizeEstimate); | ||
for (RecordBatchAndRecords recordBatchAndRecords : recordBatchAndRecordsList) { | ||
MemoryRecordsBuilder builder = convertRecordBatch(buffer, recordBatchAndRecords); | ||
buffer = builder.buffer(); | ||
} | ||
|
||
buffer.flip(); | ||
recordBatchAndRecordsList.clear(); | ||
return new ConvertedRecords<>(MemoryRecords.readableRecords(buffer), null); | ||
} | ||
|
||
private static MemoryRecordsBuilder convertRecordBatch(ByteBuffer buffer, | ||
RecordBatchAndRecords recordBatchAndRecords) { | ||
RecordBatch batch = recordBatchAndRecords.batch; | ||
byte toBatchMagic = recordBatchAndRecords.toBatchMagic; | ||
final TimestampType timestampType = batch.timestampType(); | ||
long logAppendTime = timestampType | ||
== TimestampType.LOG_APPEND_TIME ? batch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; | ||
|
||
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, toBatchMagic, batch.compressionType(), | ||
timestampType, recordBatchAndRecords.baseOffset, logAppendTime); | ||
|
||
long startOffset = recordBatchAndRecords.baseOffset; | ||
for (Record record : recordBatchAndRecords.records) { | ||
if (toBatchMagic > RecordBatch.MAGIC_VALUE_V1) { | ||
builder.appendWithOffset(startOffset++, | ||
record.timestamp(), | ||
record.key(), | ||
record.value(), | ||
record.headers()); | ||
} else { | ||
builder.appendWithOffset(startOffset++, | ||
record.timestamp(), | ||
record.key(), | ||
record.value()); | ||
} | ||
} | ||
|
||
builder.close(); | ||
return builder; | ||
} | ||
|
||
private static class RecordBatchAndRecords { | ||
private final RecordBatch batch; | ||
private final List<Record> records; | ||
private final Long baseOffset; | ||
private final byte toBatchMagic; | ||
|
||
private RecordBatchAndRecords(RecordBatch batch, | ||
List<Record> records, | ||
Long baseOffset, | ||
byte toBatchMagic) { | ||
this.batch = batch; | ||
this.records = records; | ||
this.baseOffset = baseOffset; | ||
this.toBatchMagic = toBatchMagic; | ||
} | ||
} | ||
} |
Oops, something went wrong.