diff --git a/external/storm-kafka-client/pom.xml b/external/storm-kafka-client/pom.xml
index 0878fdf7baa..92b666d1c66 100644
--- a/external/storm-kafka-client/pom.xml
+++ b/external/storm-kafka-client/pom.xml
@@ -76,7 +76,13 @@
org.hamcrest
- hamcrest-all
+ hamcrest-core
+ 1.3
+ test
+
+
+ org.hamcrest
+ hamcrest-library1.3test
@@ -94,7 +100,6 @@
org.slf4jlog4j-over-slf4j
- ${log4j-over-slf4j.version}test
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
index 9ad2be294bf..bbad9e84f49 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
@@ -25,16 +25,13 @@
import java.util.Collection;
import java.util.Collections;
-import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.NavigableSet;
import java.util.Set;
-import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
@@ -48,6 +45,7 @@
import org.apache.storm.kafka.spout.KafkaSpoutConfig.FirstPollOffsetStrategy;
import org.apache.storm.kafka.spout.internal.KafkaConsumerFactory;
import org.apache.storm.kafka.spout.internal.KafkaConsumerFactoryDefault;
+import org.apache.storm.kafka.spout.internal.OffsetManager;
import org.apache.storm.kafka.spout.internal.Timer;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
@@ -58,19 +56,19 @@
public class KafkaSpout extends BaseRichSpout {
private static final long serialVersionUID = 4151921085047987154L;
+ //Initial delay for the commit and subscription refresh timers
+ public static final long TIMER_DELAY_MS = 500;
private static final Logger LOG = LoggerFactory.getLogger(KafkaSpout.class);
- private static final Comparator OFFSET_COMPARATOR = new OffsetComparator();
// Storm
protected SpoutOutputCollector collector;
// Kafka
private final KafkaSpoutConfig kafkaSpoutConfig;
- private final KafkaConsumerFactory kafkaConsumerFactory;
+ private KafkaConsumerFactory kafkaConsumerFactory;
private transient KafkaConsumer kafkaConsumer;
private transient boolean consumerAutoCommitMode;
-
// Bookkeeping
private transient FirstPollOffsetStrategy firstPollOffsetStrategy; // Strategy to determine the fetch offset of the first realized by the spout upon activation
private transient KafkaSpoutRetryService retryService; // Class that has the logic to handle tuple failure
@@ -78,7 +76,7 @@ public class KafkaSpout extends BaseRichSpout {
private transient boolean initialized; // Flag indicating that the spout is still undergoing initialization process.
// Initialization is only complete after the first call to KafkaSpoutConsumerRebalanceListener.onPartitionsAssigned()
- transient Map acked; // Tuples that were successfully acked. These tuples will be committed periodically when the commit timer expires, after consumer rebalance, or on close/deactivate. Not used if it's AutoCommitMode
+ private transient Map acked; // Tuples that were successfully acked. These tuples will be committed periodically when the commit timer expires, after consumer rebalance, or on close/deactivate
private transient Set emitted; // Tuples that have been emitted but that are "on the wire", i.e. pending being acked or failed. Not used if it's AutoCommitMode
private transient Iterator> waitingToEmit; // Records that have been polled and are queued to be emitted in the nextTuple() call. One record is emitted per nextTuple()
private transient long numUncommittedOffsets; // Number of offsets that have been polled and emitted but not yet been committed. Not used if it's AutoCommitMode
@@ -87,13 +85,13 @@ public class KafkaSpout extends BaseRichSpout {
public KafkaSpout(KafkaSpoutConfig kafkaSpoutConfig) {
- this(kafkaSpoutConfig, new KafkaConsumerFactoryDefault());
+ this(kafkaSpoutConfig, new KafkaConsumerFactoryDefault());
}
//This constructor is here for testing
KafkaSpout(KafkaSpoutConfig kafkaSpoutConfig, KafkaConsumerFactory kafkaConsumerFactory) {
- this.kafkaSpoutConfig = kafkaSpoutConfig; // Pass in configuration
this.kafkaConsumerFactory = kafkaConsumerFactory;
+ this.kafkaSpoutConfig = kafkaSpoutConfig;
}
@Override
@@ -114,9 +112,9 @@ public void open(Map conf, TopologyContext context, SpoutOutputCollector collect
retryService = kafkaSpoutConfig.getRetryService();
if (!consumerAutoCommitMode) { // If it is auto commit, no need to commit offsets manually
- commitTimer = new Timer(500, kafkaSpoutConfig.getOffsetsCommitPeriodMs(), TimeUnit.MILLISECONDS);
+ commitTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getOffsetsCommitPeriodMs(), TimeUnit.MILLISECONDS);
}
- refreshSubscriptionTimer = new Timer(500, kafkaSpoutConfig.getPartitionRefreshPeriodMs(), TimeUnit.MILLISECONDS);
+ refreshSubscriptionTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getPartitionRefreshPeriodMs(), TimeUnit.MILLISECONDS);
acked = new HashMap<>();
emitted = new HashSet<>();
@@ -204,7 +202,7 @@ private long doSeek(TopicPartition tp, OffsetAndMetadata committedOffset) {
private void setAcked(TopicPartition tp, long fetchOffset) {
// If this partition was previously assigned to this spout, leave the acked offsets as they were to resume where it left off
if (!consumerAutoCommitMode && !acked.containsKey(tp)) {
- acked.put(tp, new OffsetEntry(tp, fetchOffset));
+ acked.put(tp, new OffsetManager(tp, fetchOffset));
}
}
@@ -296,7 +294,7 @@ private void doSeekRetriableTopicPartitions() {
if (offsetAndMeta != null) {
kafkaConsumer.seek(rtp, offsetAndMeta.offset() + 1); // seek to the next offset that is ready to commit in next commit cycle
} else {
- kafkaConsumer.seek(rtp, acked.get(rtp).committedOffset + 1); // Seek to last committed offset
+ kafkaConsumer.seek(rtp, acked.get(rtp).getCommittedOffset() + 1); // Seek to last committed offset
}
}
}
@@ -353,7 +351,7 @@ private boolean emitTupleIfNotEmitted(ConsumerRecord record) {
private void commitOffsetsForAckedTuples() {
// Find offsets that are ready to be committed for every topic partition
final Map nextCommitOffsets = new HashMap<>();
- for (Map.Entry tpOffset : acked.entrySet()) {
+ for (Map.Entry tpOffset : acked.entrySet()) {
final OffsetAndMetadata nextCommitOffset = tpOffset.getValue().findNextCommitOffset();
if (nextCommitOffset != null) {
nextCommitOffsets.put(tpOffset.getKey(), nextCommitOffset);
@@ -366,9 +364,14 @@ private void commitOffsetsForAckedTuples() {
LOG.debug("Offsets successfully committed to Kafka [{}]", nextCommitOffsets);
// Instead of iterating again, it would be possible to commit and update the state for each TopicPartition
// in the prior loop, but the multiple network calls should be more expensive than iterating twice over a small loop
- for (Map.Entry tpOffset : acked.entrySet()) {
- final OffsetEntry offsetEntry = tpOffset.getValue();
- offsetEntry.commit(nextCommitOffsets.get(tpOffset.getKey()));
+ for (Map.Entry tpOffset : nextCommitOffsets.entrySet()) {
+ //Update the OffsetManager for each committed partition, and update numUncommittedOffsets
+ final TopicPartition tp = tpOffset.getKey();
+ final OffsetManager offsetManager = acked.get(tp);
+ long numCommittedOffsets = offsetManager.commit(tpOffset.getValue());
+ numUncommittedOffsets -= numCommittedOffsets;
+ LOG.debug("[{}] uncommitted offsets across all topic partitions",
+ numUncommittedOffsets);
}
} else {
LOG.trace("No offsets to commit. {}", this);
@@ -489,127 +492,7 @@ public Map getComponentConfiguration () {
private String getTopicsString() {
return kafkaSpoutConfig.getSubscription().getTopicsString();
}
+}
- // ======= Offsets Commit Management ==========
-
- private static class OffsetComparator implements Comparator {
- public int compare(KafkaSpoutMessageId m1, KafkaSpoutMessageId m2) {
- return m1.offset() < m2.offset() ? -1 : m1.offset() == m2.offset() ? 0 : 1;
- }
- }
-
- /**
- * This class is not thread safe
- */
- class OffsetEntry {
- private final TopicPartition tp;
- private final long initialFetchOffset; /* First offset to be fetched. It is either set to the beginning, end, or to the first uncommitted offset.
- * Initial value depends on offset strategy. See KafkaSpoutConsumerRebalanceListener */
- private long committedOffset; // last offset committed to Kafka. Initially it is set to fetchOffset - 1
- private final NavigableSet ackedMsgs = new TreeSet<>(OFFSET_COMPARATOR); // acked messages sorted by ascending order of offset
-
- public OffsetEntry(TopicPartition tp, long initialFetchOffset) {
- this.tp = tp;
- this.initialFetchOffset = initialFetchOffset;
- this.committedOffset = initialFetchOffset - 1;
- LOG.debug("Instantiated {}", this);
- }
-
- public void add(KafkaSpoutMessageId msgId) { // O(Log N)
- ackedMsgs.add(msgId);
- }
-
- /**
- * An offset is only committed when all records with lower offset have
- * been acked. This guarantees that all offsets smaller than the
- * committedOffset have been delivered.
- * @return the next OffsetAndMetadata to commit, or null if no offset is ready to commit.
- */
- public OffsetAndMetadata findNextCommitOffset() {
- boolean found = false;
- long currOffset;
- long nextCommitOffset = committedOffset;
- KafkaSpoutMessageId nextCommitMsg = null; // this is a convenience variable to make it faster to create OffsetAndMetadata
-
- for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap
- if ((currOffset = currAckedMsg.offset()) == nextCommitOffset + 1) { // found the next offset to commit
- found = true;
- nextCommitMsg = currAckedMsg;
- nextCommitOffset = currOffset;
- } else if (currAckedMsg.offset() > nextCommitOffset + 1) { // offset found is not continuous to the offsets listed to go in the next commit, so stop search
- LOG.debug("topic-partition [{}] has non-continuous offset [{}]. It will be processed in a subsequent batch.", tp, currOffset);
- break;
- } else {
- //Received a redundant ack. Ignore and continue processing.
- LOG.warn("topic-partition [{}] has unexpected offset [{}]. Current committed Offset [{}]",
- tp, currOffset, committedOffset);
- }
- }
-
- OffsetAndMetadata nextCommitOffsetAndMetadata = null;
- if (found) {
- nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, nextCommitMsg.getMetadata(Thread.currentThread()));
- LOG.debug("topic-partition [{}] has offsets [{}-{}] ready to be committed",tp, committedOffset + 1, nextCommitOffsetAndMetadata.offset());
- } else {
- LOG.debug("topic-partition [{}] has NO offsets ready to be committed", tp);
- }
- LOG.trace("{}", this);
- return nextCommitOffsetAndMetadata;
- }
-
- /**
- * Marks an offset has committed. This method has side effects - it sets the internal state in such a way that future
- * calls to {@link #findNextCommitOffset()} will return offsets greater than the offset specified, if any.
- *
- * @param committedOffset offset to be marked as committed
- */
- public void commit(OffsetAndMetadata committedOffset) {
- long numCommittedOffsets = 0;
- if (committedOffset != null) {
- final long oldCommittedOffset = this.committedOffset;
- numCommittedOffsets = committedOffset.offset() - this.committedOffset;
- this.committedOffset = committedOffset.offset();
- for (Iterator iterator = ackedMsgs.iterator(); iterator.hasNext(); ) {
- if (iterator.next().offset() <= committedOffset.offset()) {
- iterator.remove();
- } else {
- break;
- }
- }
- numUncommittedOffsets-= numCommittedOffsets;
- LOG.debug("Committed offsets [{}-{} = {}] for topic-partition [{}]. [{}] uncommitted offsets across all topic partitions",
- oldCommittedOffset + 1, this.committedOffset, numCommittedOffsets, tp, numUncommittedOffsets);
- } else {
- LOG.debug("Committed [{}] offsets for topic-partition [{}]. [{}] uncommitted offsets across all topic partitions",
- numCommittedOffsets, tp, numUncommittedOffsets);
- }
- LOG.trace("{}", this);
- }
-
- long getCommittedOffset() {
- return committedOffset;
- }
-
- public boolean isEmpty() {
- return ackedMsgs.isEmpty();
- }
- public boolean contains(ConsumerRecord record) {
- return contains(new KafkaSpoutMessageId(record));
- }
-
- public boolean contains(KafkaSpoutMessageId msgId) {
- return ackedMsgs.contains(msgId);
- }
- @Override
- public String toString() {
- return "OffsetEntry{" +
- "topic-partition=" + tp +
- ", fetchOffset=" + initialFetchOffset +
- ", committedOffset=" + committedOffset +
- ", ackedMsgs=" + ackedMsgs +
- '}';
- }
- }
-}
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/OffsetManager.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/OffsetManager.java
new file mode 100755
index 00000000000..4ce04718f27
--- /dev/null
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/OffsetManager.java
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.spout.internal;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.storm.kafka.spout.KafkaSpoutMessageId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Manages acked and committed offsets for a TopicPartition. This class is not thread safe
+ */
+public class OffsetManager {
+
+ private static final Comparator OFFSET_COMPARATOR = new OffsetComparator();
+ private static final Logger LOG = LoggerFactory.getLogger(OffsetManager.class);
+ private final TopicPartition tp;
+ /* First offset to be fetched. It is either set to the beginning, end, or to the first uncommitted offset.
+ * Initial value depends on offset strategy. See KafkaSpoutConsumerRebalanceListener */
+ private final long initialFetchOffset;
+ // Last offset committed to Kafka. Initially it is set to fetchOffset - 1
+ private long committedOffset;
+ // Acked messages sorted by ascending order of offset
+ private final NavigableSet ackedMsgs = new TreeSet<>(OFFSET_COMPARATOR);
+
+ public OffsetManager(TopicPartition tp, long initialFetchOffset) {
+ this.tp = tp;
+ this.initialFetchOffset = initialFetchOffset;
+ this.committedOffset = initialFetchOffset - 1;
+ LOG.debug("Instantiated {}", this);
+ }
+
+ public void add(KafkaSpoutMessageId msgId) { // O(Log N)
+ ackedMsgs.add(msgId);
+ }
+
+ /**
+ * An offset is only committed when all records with lower offset have been
+ * acked. This guarantees that all offsets smaller than the committedOffset
+ * have been delivered.
+ *
+ * @return the next OffsetAndMetadata to commit, or null if no offset is
+ * ready to commit.
+ */
+ public OffsetAndMetadata findNextCommitOffset() {
+ boolean found = false;
+ long currOffset;
+ long nextCommitOffset = committedOffset;
+ KafkaSpoutMessageId nextCommitMsg = null; // this is a convenience variable to make it faster to create OffsetAndMetadata
+
+ for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap
+ if ((currOffset = currAckedMsg.offset()) == nextCommitOffset + 1) { // found the next offset to commit
+ found = true;
+ nextCommitMsg = currAckedMsg;
+ nextCommitOffset = currOffset;
+ } else if (currAckedMsg.offset() > nextCommitOffset + 1) { // offset found is not continuous to the offsets listed to go in the next commit, so stop search
+ LOG.debug("topic-partition [{}] has non-continuous offset [{}]. It will be processed in a subsequent batch.", tp, currOffset);
+ break;
+ } else {
+ //Received a redundant ack. Ignore and continue processing.
+ LOG.warn("topic-partition [{}] has unexpected offset [{}]. Current committed Offset [{}]",
+ tp, currOffset, committedOffset);
+ }
+ }
+
+ OffsetAndMetadata nextCommitOffsetAndMetadata = null;
+ if (found) {
+ nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, nextCommitMsg.getMetadata(Thread.currentThread()));
+ LOG.debug("topic-partition [{}] has offsets [{}-{}] ready to be committed", tp, committedOffset + 1, nextCommitOffsetAndMetadata.offset());
+ } else {
+ LOG.debug("topic-partition [{}] has NO offsets ready to be committed", tp);
+ }
+ LOG.trace("{}", this);
+ return nextCommitOffsetAndMetadata;
+ }
+
+ /**
+ * Marks an offset has committed. This method has side effects - it sets the
+ * internal state in such a way that future calls to
+ * {@link #findNextCommitOffset()} will return offsets greater than the
+ * offset specified, if any.
+ *
+ * @param committedOffset offset to be marked as committed
+ * @return Number of offsets committed in this commit
+ */
+ public long commit(OffsetAndMetadata committedOffset) {
+ long preCommitCommittedOffsets = this.committedOffset;
+ long numCommittedOffsets = committedOffset.offset() - this.committedOffset;
+ this.committedOffset = committedOffset.offset();
+ for (Iterator iterator = ackedMsgs.iterator(); iterator.hasNext();) {
+ if (iterator.next().offset() <= committedOffset.offset()) {
+ iterator.remove();
+ } else {
+ break;
+ }
+ }
+ LOG.trace("{}", this);
+
+ LOG.debug("Committed offsets [{}-{} = {}] for topic-partition [{}].",
+ preCommitCommittedOffsets + 1, this.committedOffset, numCommittedOffsets, tp);
+
+ return numCommittedOffsets;
+ }
+
+ public long getCommittedOffset() {
+ return committedOffset;
+ }
+
+ public boolean isEmpty() {
+ return ackedMsgs.isEmpty();
+ }
+
+ public boolean contains(ConsumerRecord record) {
+ return contains(new KafkaSpoutMessageId(record));
+ }
+
+ public boolean contains(KafkaSpoutMessageId msgId) {
+ return ackedMsgs.contains(msgId);
+ }
+
+ @Override
+ public String toString() {
+ return "OffsetManager{"
+ + "topic-partition=" + tp
+ + ", fetchOffset=" + initialFetchOffset
+ + ", committedOffset=" + committedOffset
+ + ", ackedMsgs=" + ackedMsgs
+ + '}';
+ }
+
+ private static class OffsetComparator implements Comparator {
+
+ @Override
+ public int compare(KafkaSpoutMessageId m1, KafkaSpoutMessageId m2) {
+ return m1.offset() < m2.offset() ? -1 : m1.offset() == m2.offset() ? 0 : 1;
+ }
+ }
+}
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/Timer.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/Timer.java
index d51104df82f..2a2e1cb69c1 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/Timer.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/Timer.java
@@ -18,6 +18,7 @@
package org.apache.storm.kafka.spout.internal;
import java.util.concurrent.TimeUnit;
+import org.apache.storm.utils.Time;
public class Timer {
private final long delay;
@@ -41,7 +42,7 @@ public Timer(long delay, long period, TimeUnit timeUnit) {
this.timeUnit = timeUnit;
periodNanos = timeUnit.toNanos(period);
- start = System.nanoTime() + timeUnit.toNanos(delay);
+ start = Time.nanoTime() + timeUnit.toNanos(delay);
}
public long period() {
@@ -65,9 +66,9 @@ public TimeUnit getTimeUnit() {
* otherwise.
*/
public boolean isExpiredResetOnTrue() {
- final boolean expired = System.nanoTime() - start > periodNanos;
+ final boolean expired = Time.nanoTime() - start >= periodNanos;
if (expired) {
- start = System.nanoTime();
+ start = Time.nanoTime();
}
return expired;
}
diff --git a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/ByTopicRecordTranslatorTest.java b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/ByTopicRecordTranslatorTest.java
index ea0b6e73f37..abc58f0e8e5 100644
--- a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/ByTopicRecordTranslatorTest.java
+++ b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/ByTopicRecordTranslatorTest.java
@@ -17,7 +17,7 @@
*/
package org.apache.storm.kafka.spout;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashSet;
diff --git a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/DefaultRecordTranslatorTest.java b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/DefaultRecordTranslatorTest.java
index f4275e49d10..681953d1f77 100644
--- a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/DefaultRecordTranslatorTest.java
+++ b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/DefaultRecordTranslatorTest.java
@@ -17,7 +17,7 @@
*/
package org.apache.storm.kafka.spout;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
import java.util.Arrays;
diff --git a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutConfigTest.java b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutConfigTest.java
index 08220dd207e..57e01205d69 100644
--- a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutConfigTest.java
+++ b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutConfigTest.java
@@ -17,7 +17,9 @@
*/
package org.apache.storm.kafka.spout;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import java.util.HashMap;
diff --git a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutRebalanceTest.java b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutRebalanceTest.java
index 9969d84eada..6a0a63ed97a 100644
--- a/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutRebalanceTest.java
+++ b/external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutRebalanceTest.java
@@ -21,10 +21,10 @@
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.anyCollection;
import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -44,6 +44,8 @@
import org.apache.storm.kafka.spout.internal.KafkaConsumerFactory;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
+import org.apache.storm.utils.Time;
+import org.apache.storm.utils.Time.SimulatedTime;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@@ -56,18 +58,16 @@ public class KafkaSpoutRebalanceTest {
@Captor
private ArgumentCaptor