Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add latency benchmark #16

Merged
merged 1 commit into from
Sep 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,32 @@ jmx address: 192.168.50.224:15905
=================================================
```

Noted that the command to set up broker can be executed multiple times to create a broker cluster.
Noted that the command to set up broker can be executed multiple times to create a broker cluster.

# Kafka tools

This project offers many kafka tools to simplify the life for kafka users.

## Latency Benchmark

This tool is used to test following latencies.
1. producer latency: the time of completing producer data request
2. E2E latency: the time for a record to travel through Kafka

Run the benchmark from source code
```shell
./gradlew run --args="Latency --bootstrap.servers 192.168.50.224:18878"
```

Run the benchmark from release
```shell
./bin/App Latency --bootstrap.servers 192.168.50.224:18878
```

### Latency Benchmark Configurations
1. --bootstrap.servers: the brokers addresses
2. --consumers: the number of consumers (threads). Default: 1
3. --producers: the number of producers (threads). Default: 1
4. --valueSize: the size of record value. Default: 100 bytes
5. --duration: the duration to run this benchmark. Default: 5 seconds
6. --flushDuration: the duration to flush producer records. Default: 2 seconds
1 change: 1 addition & 0 deletions app/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ dependencies {
application {
// Define the main class for the application.
mainClass = 'org.astraea.App'
applicationDefaultJvmArgs = ["-server", "-XX:+UseG1GC", "-Djava.awt.headless=true", "-Xms4G", "-Xmx4G"]
}

java {
Expand Down
6 changes: 2 additions & 4 deletions app/src/main/java/org/astraea/App.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@
import java.util.List;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.astraea.performance.latency.End2EndLatency;

public class App {
private static final List<Class<?>> MAIN_CLASSES =
Arrays.asList(
// add the classes having main function
);
private static final List<Class<?>> MAIN_CLASSES = Arrays.asList(End2EndLatency.class);

private static String toString(List<Class<?>> mains) {
return mains.stream().map(Class::getName).collect(Collectors.joining(","));
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package org.astraea.performance.latency;

import java.io.Closeable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;

abstract class CloseableThread implements Runnable, Closeable {
private final AtomicBoolean closed = new AtomicBoolean();
private final CountDownLatch closeLatch = new CountDownLatch(1);

@Override
public final void run() {
try {
while (!closed.get()) execute();
} catch (InterruptedException e) {
// swallow
} finally {
try {
cleanup();
} finally {
closeLatch.countDown();
}
}
}

/** looped action. */
abstract void execute() throws InterruptedException;

/** final action when leaving loop. */
void cleanup() {}

@Override
public void close() {
closed.set(true);
try {
closeLatch.await();
} catch (InterruptedException e) {
// swallow
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package org.astraea.performance.latency;

import java.util.Properties;
import java.util.Set;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;

interface ComponentFactory {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@chinghongfang 麻煩參考看看,這邊示範一個方式如何抽象實作,讓我們方便寫unit test驗證自身邏輯又可以不用真的使用kafka


/**
* create a factory based on kafka cluster. All components created by this factory will send
* request to kafka to get responses.
*
* @param brokers kafka broker addresses
* @param topics to subscribe
* @return a factory based on kafka
*/
static ComponentFactory fromKafka(String brokers, Set<String> topics) {

return new ComponentFactory() {
private final String groupId = "group-id-" + System.currentTimeMillis();

@Override
public Producer producer() {
var props = new Properties();
props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
return Producer.fromKafka(props);
}

@Override
public Consumer createConsumer() {
var props = new Properties();
props.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokers);
// all consumers are in same group, so there is no duplicate data in read workload.
props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return Consumer.fromKafka(props, topics);
}

@Override
public TopicAdmin createTopicAdmin() {
var props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
return TopicAdmin.fromKafka(props);
}
};
}

/** @return a new producer. Please close it when you don't need it. */
Producer producer();

/** @return a new consumer. Please close it when you don't need it */
Consumer createConsumer();

/** @return a new topic admin. Please close it when you don't need it. */
TopicAdmin createTopicAdmin();
}
45 changes: 45 additions & 0 deletions app/src/main/java/org/astraea/performance/latency/Consumer.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
package org.astraea.performance.latency;

import java.io.Closeable;
import java.time.Duration;
import java.util.Properties;
import java.util.Set;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;

interface Consumer extends Closeable {
Duration POLL_TIMEOUT = Duration.ofMillis(500);

static Consumer fromKafka(Properties props, Set<String> topics) {
var kafkaConsumer =
new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
kafkaConsumer.subscribe(topics);
return new Consumer() {

@Override
public ConsumerRecords<byte[], byte[]> poll() {
return kafkaConsumer.poll(POLL_TIMEOUT);
}

@Override
public void wakeup() {
kafkaConsumer.wakeup();
}

@Override
public void close() {
kafkaConsumer.close();
}
};
}

/** see {@link KafkaConsumer#poll(Duration)} */
ConsumerRecords<byte[], byte[]> poll();

/** see {@link KafkaConsumer#wakeup()} */
default void wakeup() {}

@Override
default void close() {}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
package org.astraea.performance.latency;

import java.util.Objects;

class ConsumerThread extends CloseableThread {

private final Consumer consumer;
private final DataManager dataManager;
private final MeterTracker tracker;

ConsumerThread(DataManager dataManager, MeterTracker tracker, Consumer consumer) {
this.dataManager = Objects.requireNonNull(dataManager);
this.consumer = Objects.requireNonNull(consumer);
this.tracker = Objects.requireNonNull(tracker);
}

@Override
public void execute() throws InterruptedException {
try {
var now = System.currentTimeMillis();
var records = consumer.poll();
records.forEach(
record -> {
var entry = dataManager.removeSendingRecord(record.key());
var latency = now - entry.getValue();
var produceRecord = entry.getKey();
if (!KafkaUtils.equal(produceRecord, record))
System.out.println("receive corrupt data!!!");
else tracker.record(record.serializedKeySize() + record.serializedValueSize(), latency);
});
} catch (org.apache.kafka.common.errors.WakeupException e) {
throw new InterruptedException(e.getMessage());
}
}

@Override
void cleanup() {
consumer.close();
}

@Override
public void close() {
consumer.wakeup();
super.close();
}
}
74 changes: 74 additions & 0 deletions app/src/main/java/org/astraea/performance/latency/DataManager.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
package org.astraea.performance.latency;

import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.kafka.clients.producer.ProducerRecord;

class DataManager {

static DataManager of(String topic, int valueSize) {
return new DataManager(topic, valueSize, true);
}

static DataManager noConsumer(String topic, int valueSize) {
return new DataManager(topic, valueSize, false);
}

/** record key -> (record, timestamp_done) */
private final ConcurrentMap<byte[], Map.Entry<ProducerRecord<byte[], byte[]>, Long>>
sendingRecords = new ConcurrentSkipListMap<>(Arrays::compare);

private final String topic;
private final int valueSize;
private final boolean hasConsumer;

private final AtomicLong recordIndex = new AtomicLong(0);

final AtomicLong producerRecords = new AtomicLong(0);

private DataManager(String topic, int valueSize, boolean hasConsumer) {
this.topic = Objects.requireNonNull(topic);
this.valueSize = valueSize;
this.hasConsumer = hasConsumer;
}

/**
* @return generate a new record with random data and specify topic name. The record consists of
* key, value, topic and header.
*/
ProducerRecord<byte[], byte[]> producerRecord() {
var content = String.valueOf(recordIndex.getAndIncrement());
var rawContent = content.getBytes();
var headers = Collections.singletonList(KafkaUtils.header(content, rawContent));
return new ProducerRecord<>(topic, null, rawContent, new byte[valueSize], headers);
}

void sendingRecord(ProducerRecord<byte[], byte[]> record, long now) {
if (hasConsumer) {
var previous =
sendingRecords.put(record.key(), new AbstractMap.SimpleImmutableEntry<>(record, now));
if (previous != null) throw new RuntimeException("duplicate data!!!");
}
producerRecords.incrementAndGet();
}

/**
* get sending record
*
* @param key of completed record
* @return the completed record. Or NullPointerException if there is no related record.
*/
Map.Entry<ProducerRecord<byte[], byte[]>, Long> removeSendingRecord(byte[] key) {
if (!hasConsumer)
throw new UnsupportedOperationException(
"removeSendingRecord is unsupported when there is no consumer");
return Objects.requireNonNull(sendingRecords.remove(key));
}

/** @return number of completed records */
long numberOfProducerRecords() {
return producerRecords.get();
}
}
Loading