Skip to content

Commit

Permalink
Add adocs for new feature.
Browse files Browse the repository at this point in the history
  • Loading branch information
chickenchickenlove committed Oct 8, 2024
1 parent fd3e4ff commit 6adf7f8
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 27 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
[[kafka-headers-for-batch-listener]]
= Kafka Headers for batch listener

When processing `ConsumerRecord` with the `BatchListener`, the `KafkaHeaders.DELIVERY_ATTEMPT` header can be present in a different way compared to `SingleRecordListener`.

To inject the `KafkaHeaders.DELIVERY_ATTEMPT` header into `ConsumerRecord` when using the `BatchListener`, set the `DeliveryAttemptAwareRetryListener` as the `RetryListener` in the `ErrorHandler`.

Please refer to the code below.
[source, java]
----
final FixedBackOff fixedBackOff = new FixedBackOff(1, 10);
final DefaultErrorHandler errorHandler = new DefaultErrorHandler(fixedBackOff);
errorHandler.setRetryListeners(new DeliveryAttemptAwareRetryListener());
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory);
factory.setCommonErrorHandler(errorHandler);
----
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,10 @@ When using `DeadLetterPublishingRecovererFactory`, the user applications can ove
[[x33-customize-kafka-streams-implementation]]
=== Customizing The Implementation of Kafka Streams

When using `KafkaStreamsCustomizer` it is now possible to return a custom implementation of the `KafkaStreams` object by overriding the `initKafkaStreams` method.
When using `KafkaStreamsCustomizer` it is now possible to return a custom implementation of the `KafkaStreams` object by overriding the `initKafkaStreams` method.

[[x33-kafka-headers-for-batch-listeners]]
=== KafkaHeaders.DELIVERY_ATTEMPT for batch listeners
When using a `BatchListener`, the `ConsumerRecord` can have the `KafkaHeaders.DELIVERY_ATTMPT` header in its headers fields.
If the `DeliveryAttemptAwareRetryListener` is set to error handler as retry listener, each `ConsumerRecord` has delivery attempt header.
For more details, see xref:retrytopic/kafka-headers-for-batch-listener.adoc[kafka-headers-for-batch-listener].
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;

import org.springframework.kafka.support.KafkaHeaders;
Expand All @@ -45,28 +43,17 @@ public void failedDelivery(ConsumerRecord<?, ?> record, Exception ex, int delive
}

/**
* Called after a delivery failed for batch records.
* Invoke after delivery failure for batch records.
* If the {@link KafkaHeaders}.DELIVERY_ATTEMPT header already exists in the {@link ConsumerRecord}'s headers,
* it will be removed. Then, the provided `deliveryAttempt` is added to the {@link ConsumerRecord}'s headers.
*
* @param records the records.
* @param ex the exception.
* @param deliveryAttempt the delivery attempt, if available.
*/
@Override
public void failedDelivery(ConsumerRecords<?, ?> records, Exception ex, int deliveryAttempt) {
for (ConsumerRecord<?, ?> record : records) {

Headers headers = record.headers();
int headerCount = 0;
Iterable<Header> iterator = record.headers().headers(KafkaHeaders.DELIVERY_ATTEMPT);
for (Header header : iterator) {
headerCount += 1;
}

if (headerCount > 0) {
headers.remove(KafkaHeaders.DELIVERY_ATTEMPT);
}
record.headers().remove(KafkaHeaders.DELIVERY_ATTEMPT);

byte[] buff = new byte[4]; // NOSONAR (magic #)
ByteBuffer bb = ByteBuffer.wrap(buff);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,17 +63,17 @@
@SpringJUnitConfig
@DirtiesContext
@EmbeddedKafka
class DeliveryAttemptAwareRetryListenerIntegrationTest {
class DeliveryAttemptAwareRetryListenerIntegrationTests {

static final String MAIN_TOPIC_CONTAINER_FACTORY0 = "kafkaListenerContainerFactory0";
static final String MAIN_TOPIC_CONTAINER_FACTORY0 = "deliveryMyTestKafkaListenerContainerFactory0";

static final String TEST_TOPIC0 = "myBatchDeliveryAttemptTopic0";

static final int MAX_ATTEMPT_COUNT0 = 3;

static final CountDownLatch latch0 = new CountDownLatch(MAX_ATTEMPT_COUNT0 + 1);

static final String MAIN_TOPIC_CONTAINER_FACTORY1 = "kafkaListenerContainerFactory1";
static final String MAIN_TOPIC_CONTAINER_FACTORY1 = "deliveryMyTestKafkaListenerContainerFactory1";

static final String TEST_TOPIC1 = "myBatchDeliveryAttemptTopic1";

Expand Down Expand Up @@ -103,7 +103,7 @@ void should_have_delivery_attempt_header_in_each_consumer_record(@Autowired Test
Map<Integer, Integer> deliveryAttemptCountMap = convertToMap(listener.receivedHeaders);

for (int attemptCnt = 1; attemptCnt <= MAX_ATTEMPT_COUNT0; attemptCnt++) {
assertThat(deliveryAttemptCountMap.get(attemptCnt)).isEqualTo(3);
assertThat(deliveryAttemptCountMap.get(attemptCnt)).isGreaterThan(1);
}
}

Expand All @@ -125,7 +125,7 @@ void should_have_delivery_attempt_header_in_each_consumer_record_with_more_bigge
Map<Integer, Integer> deliveryAttemptCountMap = convertToMap(listener.receivedHeaders);

for (int attemptCnt = 1; attemptCnt <= MAX_ATTEMPT_COUNT1; attemptCnt++) {
assertThat(deliveryAttemptCountMap.get(attemptCnt)).isEqualTo(3);
assertThat(deliveryAttemptCountMap.get(attemptCnt)).isGreaterThan(1);
}
}

Expand Down Expand Up @@ -266,9 +266,9 @@ ConsumerFactory<String, String> consumerFactory() {

@Bean
ConcurrentKafkaListenerContainerFactory<String, String>
kafkaListenerContainerFactory0(ConsumerFactory<String, String> consumerFactory) {
deliveryMyTestKafkaListenerContainerFactory0(ConsumerFactory<String, String> consumerFactory) {

final FixedBackOff fixedBackOff = new FixedBackOff(1000L, MAX_ATTEMPT_COUNT0);
final FixedBackOff fixedBackOff = new FixedBackOff(1, MAX_ATTEMPT_COUNT0);
DefaultErrorHandler errorHandler = new DefaultErrorHandler(fixedBackOff);
errorHandler.setRetryListeners(new DeliveryAttemptAwareRetryListener());

Expand All @@ -285,9 +285,9 @@ ConsumerFactory<String, String> consumerFactory() {

@Bean
ConcurrentKafkaListenerContainerFactory<String, String>
kafkaListenerContainerFactory1(ConsumerFactory<String, String> consumerFactory) {
deliveryMyTestKafkaListenerContainerFactory1(ConsumerFactory<String, String> consumerFactory) {

final FixedBackOff fixedBackOff = new FixedBackOff(1000L, MAX_ATTEMPT_COUNT1);
final FixedBackOff fixedBackOff = new FixedBackOff(1, MAX_ATTEMPT_COUNT1);
DefaultErrorHandler errorHandler = new DefaultErrorHandler(fixedBackOff);
errorHandler.setRetryListeners(new DeliveryAttemptAwareRetryListener());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@
/**
* @author Sanghyeok An
* @since 3.3
*
*/

class DeliveryAttemptAwareRetryListenerTest {
class DeliveryAttemptAwareRetryListenerTests {

@Test
void should_have_single_header_and_header_value_should_be_1() {
Expand Down

0 comments on commit 6adf7f8

Please sign in to comment.