diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java b/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
index 3ab73c5bdec8..9d076f75008e 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.hive.common;
-import com.google.common.base.Objects;
-
import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hive.common.guava.Objects;
+
/**
* JVM and logging related metrics info instances. Ported from Hadoop JvmMetricsInfo.
*/
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
index cf080e3fbe7a..63b245781b01 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
@@ -19,7 +19,6 @@
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@@ -29,6 +28,7 @@
import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
import org.apache.hadoop.util.Daemon;
+import org.apache.hive.common.guava.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/common/src/java/org/apache/hive/common/guava/Objects.java b/common/src/java/org/apache/hive/common/guava/Objects.java
new file mode 100644
index 000000000000..73c07cbf4053
--- /dev/null
+++ b/common/src/java/org/apache/hive/common/guava/Objects.java
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.common.guava;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import com.google.common.annotations.GwtCompatible;
+
+import java.util.Arrays;
+
+import javax.annotation.Nullable;
+
+/**
+ * Helper functions that can operate on any {@code Object}.
+ *
+ *
See the Guava User Guide on writing
+ * {@code Object} methods with {@code Objects}.
+ *
+ * @author Laurence Gonsalves
+ * @since 2.0 (imported from Google Collections Library)
+ *
+ * This code is from Guava's 14.0.1 source code, because there is no compatible way to
+ * use this functionality in both a Guava 14 ~ 20 environment and a Guava >21 environment.
+ */
+@GwtCompatible
+public final class Objects {
+ private Objects() {}
+
+ /**
+ * Determines whether two possibly-null objects are equal. Returns:
+ *
+ *
+ * - {@code true} if {@code a} and {@code b} are both null.
+ *
- {@code true} if {@code a} and {@code b} are both non-null and they are
+ * equal according to {@link Object#equals(Object)}.
+ *
- {@code false} in all other situations.
+ *
+ *
+ * This assumes that any non-null objects passed to this function conform
+ * to the {@code equals()} contract.
+ */
+ public static boolean equal(@Nullable Object a, @Nullable Object b) {
+ return a == b || (a != null && a.equals(b));
+ }
+
+ /**
+ * Generates a hash code for multiple values. The hash code is generated by
+ * calling {@link Arrays#hashCode(Object[])}.
+ *
+ *
This is useful for implementing {@link Object#hashCode()}. For example,
+ * in an object that has three properties, {@code x}, {@code y}, and
+ * {@code z}, one could write:
+ *
+ * public int hashCode() {
+ * return Objects.hashCode(getX(), getY(), getZ());
+ * }
+ *
+ * Warning: When a single object is supplied, the returned hash code
+ * does not equal the hash code of that object.
+ */
+ public static int hashCode(@Nullable Object... objects) {
+ return Arrays.hashCode(objects);
+ }
+
+ /**
+ * Creates an instance of {@link ToStringHelper}.
+ *
+ * This is helpful for implementing {@link Object#toString()}.
+ * Specification by example:
{@code
+ * // Returns "ClassName{}"
+ * Objects.toStringHelper(this)
+ * .toString();
+ *
+ * // Returns "ClassName{x=1}"
+ * Objects.toStringHelper(this)
+ * .add("x", 1)
+ * .toString();
+ *
+ * // Returns "MyObject{x=1}"
+ * Objects.toStringHelper("MyObject")
+ * .add("x", 1)
+ * .toString();
+ *
+ * // Returns "ClassName{x=1, y=foo}"
+ * Objects.toStringHelper(this)
+ * .add("x", 1)
+ * .add("y", "foo")
+ * .toString();
+ * }}
+ *
+ * // Returns "ClassName{x=1}"
+ * Objects.toStringHelper(this)
+ * .omitNullValues()
+ * .add("x", 1)
+ * .add("y", null)
+ * .toString();
+ * }}
+ *
+ * Note that in GWT, class names are often obfuscated.
+ *
+ * @param self the object to generate the string for (typically {@code this}),
+ * used only for its class name
+ * @since 2.0
+ */
+ public static ToStringHelper toStringHelper(Object self) {
+ return new ToStringHelper(simpleName(self.getClass()));
+ }
+
+ /**
+ * Creates an instance of {@link ToStringHelper} in the same manner as
+ * {@link Objects#toStringHelper(Object)}, but using the name of {@code clazz}
+ * instead of using an instance's {@link Object#getClass()}.
+ *
+ *
Note that in GWT, class names are often obfuscated.
+ *
+ * @param clazz the {@link Class} of the instance
+ * @since 7.0 (source-compatible since 2.0)
+ */
+ public static ToStringHelper toStringHelper(Class> clazz) {
+ return new ToStringHelper(simpleName(clazz));
+ }
+
+ /**
+ * Creates an instance of {@link ToStringHelper} in the same manner as
+ * {@link Objects#toStringHelper(Object)}, but using {@code className} instead
+ * of using an instance's {@link Object#getClass()}.
+ *
+ * @param className the name of the instance type
+ * @since 7.0 (source-compatible since 2.0)
+ */
+ public static ToStringHelper toStringHelper(String className) {
+ return new ToStringHelper(className);
+ }
+
+ /**
+ * {@link Class#getSimpleName()} is not GWT compatible yet, so we
+ * provide our own implementation.
+ */
+ private static String simpleName(Class> clazz) {
+ String name = clazz.getName();
+
+ // the nth anonymous class has a class name ending in "Outer$n"
+ // and local inner classes have names ending in "Outer.$1Inner"
+ name = name.replaceAll("\\$[0-9]+", "\\$");
+
+ // we want the name of the inner class all by its lonesome
+ int start = name.lastIndexOf('$');
+
+ // if this isn't an inner class, just find the start of the
+ // top level class name.
+ if (start == -1) {
+ start = name.lastIndexOf('.');
+ }
+ return name.substring(start + 1);
+ }
+
+ /**
+ * Returns the first of two given parameters that is not {@code null}, if
+ * either is, or otherwise throws a {@link NullPointerException}.
+ *
+ *
Note: if {@code first} is represented as an {@code Optional},
+ * this can be accomplished with {@code first.or(second)}. That approach also
+ * allows for lazy evaluation of the fallback instance, using
+ * {@code first.or(Supplier)}.
+ *
+ * @return {@code first} if {@code first} is not {@code null}, or
+ * {@code second} if {@code first} is {@code null} and {@code second} is
+ * not {@code null}
+ * @throws NullPointerException if both {@code first} and {@code second} were
+ * {@code null}
+ * @since 3.0
+ */
+ public static T firstNonNull(@Nullable T first, @Nullable T second) {
+ return first != null ? first : checkNotNull(second);
+ }
+
+ /**
+ * Support class for {@link Objects#toStringHelper}.
+ *
+ * @author Jason Lee
+ * @since 2.0
+ */
+ public static final class ToStringHelper {
+ private final String className;
+ private ValueHolder holderHead = new ValueHolder();
+ private ValueHolder holderTail = holderHead;
+ private boolean omitNullValues = false;
+
+ /**
+ * Use {@link Objects#toStringHelper(Object)} to create an instance.
+ */
+ private ToStringHelper(String className) {
+ this.className = checkNotNull(className);
+ }
+
+ /**
+ * Configures the {@link ToStringHelper} so {@link #toString()} will ignore
+ * properties with null value. The order of calling this method, relative
+ * to the {@code add()}/{@code addValue()} methods, is not significant.
+ *
+ * @since 12.0
+ */
+ public ToStringHelper omitNullValues() {
+ omitNullValues = true;
+ return this;
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format. If {@code value} is {@code null}, the string {@code "null"}
+ * is used, unless {@link #omitNullValues()} is called, in which case this
+ * name/value pair will not be added.
+ */
+ public ToStringHelper add(String name, @Nullable Object value) {
+ return addHolder(name, value);
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, boolean value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, char value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, double value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, float value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, int value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds a name/value pair to the formatted output in {@code name=value}
+ * format.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper add(String name, long value) {
+ return addHolder(name, String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ * It is strongly encouraged to use {@link #add(String, Object)} instead
+ * and give value a readable name.
+ */
+ public ToStringHelper addValue(@Nullable Object value) {
+ return addHolder(value);
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, boolean)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(boolean value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, char)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(char value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, double)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(double value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, float)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(float value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, int)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(int value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Adds an unnamed value to the formatted output.
+ *
+ *
It is strongly encouraged to use {@link #add(String, long)} instead
+ * and give value a readable name.
+ *
+ * @since 11.0 (source-compatible since 2.0)
+ */
+ public ToStringHelper addValue(long value) {
+ return addHolder(String.valueOf(value));
+ }
+
+ /**
+ * Returns a string in the format specified by {@link
+ * Objects#toStringHelper(Object)}.
+ *
+ *
After calling this method, you can keep adding more properties to later
+ * call toString() again and get a more complete representation of the
+ * same object; but properties cannot be removed, so this only allows
+ * limited reuse of the helper instance. The helper allows duplication of
+ * properties (multiple name/value pairs with the same name can be added).
+ */
+ @Override public String toString() {
+ // create a copy to keep it consistent in case value changes
+ boolean omitNullValuesSnapshot = omitNullValues;
+ String nextSeparator = "";
+ StringBuilder builder = new StringBuilder(32).append(className)
+ .append('{');
+ for (ValueHolder valueHolder = holderHead.next; valueHolder != null;
+ valueHolder = valueHolder.next) {
+ if (!omitNullValuesSnapshot || valueHolder.value != null) {
+ builder.append(nextSeparator);
+ nextSeparator = ", ";
+
+ if (valueHolder.name != null) {
+ builder.append(valueHolder.name).append('=');
+ }
+ builder.append(valueHolder.value);
+ }
+ }
+ return builder.append('}').toString();
+ }
+
+ private ValueHolder addHolder() {
+ ValueHolder valueHolder = new ValueHolder();
+ holderTail = holderTail.next = valueHolder;
+ return valueHolder;
+ }
+
+ private ToStringHelper addHolder(@Nullable Object value) {
+ ValueHolder valueHolder = addHolder();
+ valueHolder.value = value;
+ return this;
+ }
+
+ private ToStringHelper addHolder(String name, @Nullable Object value) {
+ ValueHolder valueHolder = addHolder();
+ valueHolder.value = value;
+ valueHolder.name = checkNotNull(name);
+ return this;
+ }
+
+ private static final class ValueHolder {
+ String name;
+ Object value;
+ ValueHolder next;
+ }
+ }
+}
diff --git a/common/src/java/org/apache/hive/common/guava/SameThreadExecutorUtil.java b/common/src/java/org/apache/hive/common/guava/SameThreadExecutorUtil.java
new file mode 100644
index 000000000000..48a656e076d3
--- /dev/null
+++ b/common/src/java/org/apache/hive/common/guava/SameThreadExecutorUtil.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.common.guava;
+
+import com.google.common.util.concurrent.*;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Factory and utility methods for {@link java.util.concurrent.Executor}, {@link
+ * ExecutorService}, and {@link ThreadFactory}.
+ *
+ * @author Eric Fellheimer
+ * @author Kyle Littlefield
+ * @author Justin Mahoney
+ * @since 3.0
+ *
+ * This code is from Guava's 14.0 source code, because there is no compatible way to
+ * use this functionality in both a Guava 14 ~ 25 environment and a Guava >26 environment.
+ */
+public final class SameThreadExecutorUtil {
+ private SameThreadExecutorUtil() {}
+
+ /**
+ * Creates an executor service that runs each task in the thread
+ * that invokes {@code execute/submit}, as in {@link CallerRunsPolicy} This
+ * applies both to individually submitted tasks and to collections of tasks
+ * submitted via {@code invokeAll} or {@code invokeAny}. In the latter case,
+ * tasks will run serially on the calling thread. Tasks are run to
+ * completion before a {@code Future} is returned to the caller (unless the
+ * executor has been shutdown).
+ *
+ *
Although all tasks are immediately executed in the thread that
+ * submitted the task, this {@code ExecutorService} imposes a small
+ * locking overhead on each task submission in order to implement shutdown
+ * and termination behavior.
+ *
+ *
The implementation deviates from the {@code ExecutorService}
+ * specification with regards to the {@code shutdownNow} method. First,
+ * "best-effort" with regards to canceling running tasks is implemented
+ * as "no-effort". No interrupts or other attempts are made to stop
+ * threads executing tasks. Second, the returned list will always be empty,
+ * as any submitted task is considered to have started execution.
+ * This applies also to tasks given to {@code invokeAll} or {@code invokeAny}
+ * which are pending serial execution, even the subset of the tasks that
+ * have not yet started execution. It is unclear from the
+ * {@code ExecutorService} specification if these should be included, and
+ * it's much easier to implement the interpretation that they not be.
+ * Finally, a call to {@code shutdown} or {@code shutdownNow} may result
+ * in concurrent calls to {@code invokeAll/invokeAny} throwing
+ * RejectedExecutionException, although a subset of the tasks may already
+ * have been executed.
+ *
+ * @since 10.0 (mostly source-compatible since 3.0)
+ */
+ public static ListeningExecutorService sameThreadExecutor() {
+ return new SameThreadExecutorService();
+ }
+
+ // See sameThreadExecutor javadoc for behavioral notes.
+ private static class SameThreadExecutorService
+ extends AbstractListeningExecutorService {
+ /**
+ * Lock used whenever accessing the state variables
+ * (runningTasks, shutdown, terminationCondition) of the executor
+ */
+ private final Lock lock = new ReentrantLock();
+
+ /** Signaled after the executor is shutdown and running tasks are done */
+ private final Condition termination = lock.newCondition();
+
+ /*
+ * Conceptually, these two variables describe the executor being in
+ * one of three states:
+ * - Active: shutdown == false
+ * - Shutdown: runningTasks > 0 and shutdown == true
+ * - Terminated: runningTasks == 0 and shutdown == true
+ */
+ private int runningTasks = 0;
+ private boolean shutdown = false;
+
+ @Override
+ public void execute(Runnable command) {
+ startTask();
+ try {
+ command.run();
+ } finally {
+ endTask();
+ }
+ }
+
+ @Override
+ public boolean isShutdown() {
+ lock.lock();
+ try {
+ return shutdown;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void shutdown() {
+ lock.lock();
+ try {
+ shutdown = true;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ // See sameThreadExecutor javadoc for unusual behavior of this method.
+ @Override
+ public List shutdownNow() {
+ shutdown();
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean isTerminated() {
+ lock.lock();
+ try {
+ return shutdown && runningTasks == 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public boolean awaitTermination(long timeout, TimeUnit unit)
+ throws InterruptedException {
+ long nanos = unit.toNanos(timeout);
+ lock.lock();
+ try {
+ for (;;) {
+ if (isTerminated()) {
+ return true;
+ } else if (nanos <= 0) {
+ return false;
+ } else {
+ nanos = termination.awaitNanos(nanos);
+ }
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * Checks if the executor has been shut down and increments the running
+ * task count.
+ *
+ * @throws RejectedExecutionException if the executor has been previously
+ * shutdown
+ */
+ private void startTask() {
+ lock.lock();
+ try {
+ if (isShutdown()) {
+ throw new RejectedExecutionException("Executor already shutdown");
+ }
+ runningTasks++;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * Decrements the running task count.
+ */
+ private void endTask() {
+ lock.lock();
+ try {
+ runningTasks--;
+ if (isTerminated()) {
+ termination.signalAll();
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+}
diff --git a/common/src/java/org/apache/hive/common/guava/Stopwatch.java b/common/src/java/org/apache/hive/common/guava/Stopwatch.java
new file mode 100644
index 000000000000..20324c1cf151
--- /dev/null
+++ b/common/src/java/org/apache/hive/common/guava/Stopwatch.java
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2008 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.common.guava;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.GwtCompatible;
+import com.google.common.annotations.GwtIncompatible;
+import com.google.common.base.Ticker;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An object that measures elapsed time in nanoseconds. It is useful to measure
+ * elapsed time using this class instead of direct calls to {@link
+ * System#nanoTime} for a few reasons:
+ *
+ *
+ * - An alternate time source can be substituted, for testing or performance
+ * reasons.
+ *
- As documented by {@code nanoTime}, the value returned has no absolute
+ * meaning, and can only be interpreted as relative to another timestamp
+ * returned by {@code nanoTime} at a different time. {@code Stopwatch} is a
+ * more effective abstraction because it exposes only these relative values,
+ * not the absolute ones.
+ *
+ *
+ * Basic usage:
+ *
+ * Stopwatch stopwatch = new Stopwatch().{@link #start start}();
+ * doSomething();
+ * stopwatch.{@link #stop stop}(); // optional
+ *
+ * long millis = stopwatch.elapsed(MILLISECONDS);
+ *
+ * log.info("that took: " + stopwatch); // formatted string like "12.3 ms"
+ *
+ *
+ * Stopwatch methods are not idempotent; it is an error to start or stop a
+ * stopwatch that is already in the desired state.
+ *
+ *
When testing code that uses this class, use the {@linkplain
+ * #Stopwatch(Ticker) alternate constructor} to supply a fake or mock ticker.
+ * This allows you to
+ * simulate any valid behavior of the stopwatch.
+ *
+ *
Note: This class is not thread-safe.
+ *
+ * @author Kevin Bourrillion
+ * @since 10.0
+ *
+ * This code is from Guava's 14.0.1 source code, because there is no compatible way to
+ * use this functionality in both a Guava 14 ~ 16 environment and a Guava >17 environment.
+ */
+@Beta
+@GwtCompatible(emulated = true)
+public final class Stopwatch {
+ private final Ticker ticker;
+ private boolean isRunning;
+ private long elapsedNanos;
+ private long startTick;
+
+ /**
+ * Creates (but does not start) a new stopwatch using {@link System#nanoTime}
+ * as its time source.
+ */
+ public Stopwatch() {
+ this(Ticker.systemTicker());
+ }
+
+ /**
+ * Creates (but does not start) a new stopwatch, using the specified time
+ * source.
+ */
+ public Stopwatch(Ticker ticker) {
+ this.ticker = checkNotNull(ticker, "ticker");
+ }
+
+ /**
+ * Returns {@code true} if {@link #start()} has been called on this stopwatch,
+ * and {@link #stop()} has not been called since the last call to {@code
+ * start()}.
+ */
+ public boolean isRunning() {
+ return isRunning;
+ }
+
+ /**
+ * Starts the stopwatch.
+ *
+ * @return this {@code Stopwatch} instance
+ * @throws IllegalStateException if the stopwatch is already running.
+ */
+ public Stopwatch start() {
+ checkState(!isRunning,
+ "This stopwatch is already running; it cannot be started more than once.");
+ isRunning = true;
+ startTick = ticker.read();
+ return this;
+ }
+
+ /**
+ * Stops the stopwatch. Future reads will return the fixed duration that had
+ * elapsed up to this point.
+ *
+ * @return this {@code Stopwatch} instance
+ * @throws IllegalStateException if the stopwatch is already stopped.
+ */
+ public Stopwatch stop() {
+ long tick = ticker.read();
+ checkState(isRunning,
+ "This stopwatch is already stopped; it cannot be stopped more than once.");
+ isRunning = false;
+ elapsedNanos += tick - startTick;
+ return this;
+ }
+
+ /**
+ * Sets the elapsed time for this stopwatch to zero,
+ * and places it in a stopped state.
+ *
+ * @return this {@code Stopwatch} instance
+ */
+ public Stopwatch reset() {
+ elapsedNanos = 0;
+ isRunning = false;
+ return this;
+ }
+
+ private long elapsedNanos() {
+ return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos;
+ }
+
+ /**
+ * Returns the current elapsed time shown on this stopwatch, expressed
+ * in the desired time unit, with any fraction rounded down.
+ *
+ *
Note that the overhead of measurement can be more than a microsecond, so
+ * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
+ * precision here.
+ *
+ * @since 14.0 (since 10.0 as {@code elapsedTime()})
+ */
+ public long elapsed(TimeUnit desiredUnit) {
+ return desiredUnit.convert(elapsedNanos(), NANOSECONDS);
+ }
+
+ /**
+ * Returns the current elapsed time shown on this stopwatch, expressed
+ * in the desired time unit, with any fraction rounded down.
+ *
+ *
Note that the overhead of measurement can be more than a microsecond, so
+ * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
+ * precision here.
+ *
+ * @deprecated Use {@link Stopwatch#elapsed(TimeUnit)} instead. This method is
+ * scheduled to be removed in Guava release 16.0.
+ */
+ @Deprecated
+ public long elapsedTime(TimeUnit desiredUnit) {
+ return elapsed(desiredUnit);
+ }
+
+ /**
+ * Returns the current elapsed time shown on this stopwatch, expressed
+ * in milliseconds, with any fraction rounded down. This is identical to
+ * {@code elapsed(TimeUnit.MILLISECONDS)}.
+ *
+ * @deprecated Use {@code stopwatch.elapsed(MILLISECONDS)} instead. This
+ * method is scheduled to be removed in Guava release 16.0.
+ */
+ @Deprecated
+ public long elapsedMillis() {
+ return elapsed(MILLISECONDS);
+ }
+
+ /**
+ * Returns a string representation of the current elapsed time.
+ */
+ @GwtIncompatible("String.format()")
+ @Override public String toString() {
+ return toString(4);
+ }
+
+ /**
+ * Returns a string representation of the current elapsed time, choosing an
+ * appropriate unit and using the specified number of significant figures.
+ * For example, at the instant when {@code elapsed(NANOSECONDS)} would
+ * return {1234567}, {@code toString(4)} returns {@code "1.235 ms"}.
+ *
+ * @deprecated Use {@link #toString()} instead. This method is scheduled
+ * to be removed in Guava release 15.0.
+ */
+ @Deprecated
+ @GwtIncompatible("String.format()")
+ public String toString(int significantDigits) {
+ long nanos = elapsedNanos();
+
+ TimeUnit unit = chooseUnit(nanos);
+ double value = (double) nanos / NANOSECONDS.convert(1, unit);
+
+ // Too bad this functionality is not exposed as a regular method call
+ return String.format("%." + significantDigits + "g %s",
+ value, abbreviate(unit));
+ }
+
+ private static TimeUnit chooseUnit(long nanos) {
+ if (SECONDS.convert(nanos, NANOSECONDS) > 0) {
+ return SECONDS;
+ }
+ if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) {
+ return MILLISECONDS;
+ }
+ if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) {
+ return MICROSECONDS;
+ }
+ return NANOSECONDS;
+ }
+
+ private static String abbreviate(TimeUnit unit) {
+ switch (unit) {
+ case NANOSECONDS:
+ return "ns";
+ case MICROSECONDS:
+ return "\u03bcs"; // μs
+ case MILLISECONDS:
+ return "ms";
+ case SECONDS:
+ return "s";
+ default:
+ throw new AssertionError();
+ }
+ }
+}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
index b9fc09bea3e3..5767ac955ac0 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
@@ -86,6 +86,10 @@ public boolean apply(AclEntry input) {
}
return false;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(AclEntry input) {
+ return apply(input);
+ }
});
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
index b153679dc8cd..3aeff8937033 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
@@ -148,6 +148,10 @@ public boolean apply(ConnParamInfo inputParam) {
return inputParam.host.equals(host) && inputParam.port == port &&
inputParam.path.startsWith(pathPrefix);
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(ConnParamInfo input) {
+ return apply(input);
+ }
}
//Mocks HS2 publishing logic.
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
index a735346c0626..1fef1226e543 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
@@ -22,11 +22,11 @@
import java.util.concurrent.TimeUnit;
-import com.google.common.base.Stopwatch;
import org.apache.hadoop.hive.cli.control.AbstractCliConfig.MetastoreType;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.util.ElapsedTimeLoggingWrapper;
+import org.apache.hive.common.guava.Stopwatch;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
index 383fa8c070f7..70ee1ed57366 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
@@ -87,6 +87,10 @@ public boolean apply(@Nullable HivePrivilegeObject hivePrivilegeObject) {
}
return !bypassObjectTypes.contains(hivePrivilegeObject.getType());
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(HivePrivilegeObject input) {
+ return apply(input);
+ }
}));
}
}
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java b/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
index 061a918f994e..7b97d9b49029 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
@@ -20,9 +20,10 @@
import java.util.concurrent.TimeUnit;
-import com.google.common.base.Stopwatch;
import org.slf4j.Logger;
+import org.apache.hive.common.guava.Stopwatch;
+
public abstract class ElapsedTimeLoggingWrapper {
public abstract T invokeInternal() throws Exception;
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
index ce75d722ab09..26ddd68b93b2 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
@@ -65,6 +65,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -121,6 +122,8 @@ public LlapProtocolClientProxy(
@Override
public void serviceStart() {
requestManagerFuture = requestManagerExecutor.submit(requestManager);
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback`
+ // method with `Executor` parameter.
Futures.addCallback(requestManagerFuture, new FutureCallback() {
@Override
public void onSuccess(Void result) {
@@ -131,7 +134,7 @@ public void onSuccess(Void result) {
public void onFailure(Throwable t) {
LOG.warn("RequestManager shutdown with error", t);
}
- });
+ }, SameThreadExecutorUtil.sameThreadExecutor());
}
@Override
@@ -263,7 +266,9 @@ public void shutdown() {
void submitToExecutor(CallableRequest request, LlapNodeId nodeId) {
ListenableFuture future =
executor.submit(request);
- Futures.addCallback(future, new ResponseCallback(request.getCallback(), nodeId, this));
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
+ Futures.addCallback(future, new ResponseCallback(request.getCallback(), nodeId, this),
+ SameThreadExecutorUtil.sameThreadExecutor());
}
@VisibleForTesting
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
index b4c62d5a0723..d865afa1f544 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
@@ -67,6 +67,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import org.apache.tez.common.CallableWithNdc;
import org.apache.tez.common.security.JobTokenIdentifier;
import org.apache.tez.dag.records.TezTaskAttemptID;
@@ -161,6 +162,7 @@ public AMReporter(int numExecutors, int maxThreads, AtomicReference() {
@Override
public void onSuccess(Void result) {
@@ -176,7 +178,7 @@ public void onFailure(Throwable t) {
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), t);
}
}
- });
+ }, SameThreadExecutorUtil.sameThreadExecutor());
// TODO: why is this needed? we could just save the host and port?
nodeId = LlapNodeId.getInstance(localAddress.get().getHostName(), localAddress.get().getPort());
LOG.info("AMReporter running with DaemonId: {}, NodeId: {}", daemonId, nodeId);
@@ -260,6 +262,7 @@ public void taskKilled(String amLocation, int port, String umbilicalUser, Token<
// only happen after the AtomicReference address has been populated. Not adding an additional check.
ListenableFuture future =
executor.submit(new KillTaskCallable(taskAttemptId, amNodeInfo));
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(Void result) {
@@ -271,7 +274,7 @@ public void onFailure(Throwable t) {
LOG.warn("Failed to send taskKilled for {}. The attempt will likely time out.",
taskAttemptId);
}
- });
+ }, SameThreadExecutorUtil.sameThreadExecutor());
}
public void queryComplete(QueryIdentifier queryIdentifier) {
@@ -337,7 +340,7 @@ public void onFailure(Throwable t) {
amNodeInfo.amNodeId, currentQueryIdentifier, t);
queryFailedHandler.queryFailed(currentQueryIdentifier);
}
- });
+ }, SameThreadExecutorUtil.sameThreadExecutor());
}
}
} catch (InterruptedException e) {
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index 3d597029ab77..bad8b5a3ced7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hive.llap.counters.FragmentCountersMap;
import org.apache.hadoop.hive.llap.daemon.SchedulerFragmentCompletingListener;
import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import org.apache.tez.common.counters.TezCounters;
import org.apache.tez.dag.api.TezException;
import org.apache.tez.dag.records.TezTaskAttemptID;
@@ -123,7 +124,8 @@ public synchronized void registerTask(RuntimeTask task,
currentCallable = new HeartbeatCallable(completionListener, task, umbilical, pollInterval, sendCounterInterval,
maxEventsToGet, requestCounter, containerIdStr, initialEvent, fragmentRequestId);
ListenableFuture future = heartbeatExecutor.submit(currentCallable);
- Futures.addCallback(future, new HeartbeatCallback(errorReporter));
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
+ Futures.addCallback(future, new HeartbeatCallback(errorReporter), SameThreadExecutorUtil.sameThreadExecutor());
}
/**
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 70447d94d35a..b5929b2736a7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hive.llap.tezplugins.helpers.MonotonicClock;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import org.apache.tez.runtime.task.EndReason;
import org.apache.tez.runtime.task.TaskRunner2Result;
import org.slf4j.Logger;
@@ -168,7 +169,8 @@ public TaskExecutorService(int numExecutors, int waitQueueSize,
executionCompletionExecutorService = MoreExecutors.listeningDecorator(
executionCompletionExecutorServiceRaw);
ListenableFuture> future = waitQueueExecutorService.submit(new WaitQueueWorker());
- Futures.addCallback(future, new WaitQueueWorkerCallback());
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
+ Futures.addCallback(future, new WaitQueueWorkerCallback(), SameThreadExecutorUtil.sameThreadExecutor());
}
private Comparator createComparator(
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index c3a74afd3d9d..c482f6eb0a3f 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.llap.daemon.impl;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Stopwatch;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.common.util.concurrent.FutureCallback;
@@ -44,6 +43,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hive.common.guava.Stopwatch;
import org.apache.log4j.MDC;
import org.apache.log4j.NDC;
import org.apache.tez.common.CallableWithNdc;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
index 427a0b1c19c1..acc41e5cb58b 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.Objects;
+import org.apache.hive.common.guava.Objects;
/**
* Metrics information for llap cache.
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
index 69d1c6fff14f..334ab370e64e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.Objects;
+import org.apache.hive.common.guava.Objects;
/**
* Metrics information for llap daemon container.
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
index f0fde62a6ad6..40351ef83fd0 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.Objects;
+import org.apache.hive.common.guava.Objects;
/**
* Llap daemon I/O elevator metrics
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
index efbddaad14ff..14795db58d94 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.Objects;
+import org.apache.hive.common.guava.Objects;
/**
* Llap daemon JVM info. These are some additional metrics that are not exposed via
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
index 6bedccbd184e..42ac52ae31ec 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -81,6 +81,7 @@
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -317,15 +318,21 @@ public Void call() throws Exception {
}, 10000L, TimeUnit.MILLISECONDS);
nodeEnablerFuture = nodeEnabledExecutor.submit(nodeEnablerCallable);
- Futures.addCallback(nodeEnablerFuture, new LoggingFutureCallback("NodeEnablerThread", LOG));
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
+ Futures.addCallback(nodeEnablerFuture, new LoggingFutureCallback("NodeEnablerThread", LOG),
+ SameThreadExecutorUtil.sameThreadExecutor());
delayedTaskSchedulerFuture =
delayedTaskSchedulerExecutor.submit(delayedTaskSchedulerCallable);
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
Futures.addCallback(delayedTaskSchedulerFuture,
- new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG));
+ new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG),
+ SameThreadExecutorUtil.sameThreadExecutor());
schedulerFuture = schedulerExecutor.submit(schedulerCallable);
- Futures.addCallback(schedulerFuture, new LoggingFutureCallback("SchedulerThread", LOG));
+ // HIVE-27560: In order to support Guava 26+, need to use the `addCallback` method with `Executor` parameter.
+ Futures.addCallback(schedulerFuture, new LoggingFutureCallback("SchedulerThread", LOG),
+ SameThreadExecutorUtil.sameThreadExecutor());
registry.start();
registry.registerStateChangeListener(new NodeStateChangeListener());
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
index c190be86ae66..6b3371a56002 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.Objects;
+import org.apache.hive.common.guava.Objects;
/**
* Metrics information for llap task scheduler.
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
index f66200fcd3cb..899aa446d6ca 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
@@ -36,6 +36,8 @@
import org.apache.twill.discovery.InMemoryDiscoveryService;
import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -61,7 +63,14 @@ public void connect() throws IOException {
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) {
LOG.debug("Using an in memory client transaction system for testing");
TransactionManager txnMgr = new TransactionManager(conf);
- txnMgr.startAndWait();
+ try {
+ // HIVE-27560: In order to support Guava 17+, change to using reflection for method calls.
+ // Before Guava 16, need to call 'startAndWait', and after Guava 17, need to call
+ // `startAsync ` and `awaitRunning`.
+ startAndWait(txnMgr);
+ } catch (InvocationTargetException | IllegalAccessException | NoSuchMethodException e) {
+ throw new RuntimeException("txnMgr start failed", e);
+ }
txnClient = new InMemoryTxSystemClient(txnMgr);
} else {
// TODO should enable use of ZKDiscoveryService if users want it
@@ -124,4 +133,16 @@ public HTableInterface getHBaseTable(String tableName, boolean force) throws IOE
return (TransactionAwareHTable)txnTables.get(tableName);
}
+ private void startAndWait(TransactionManager txnMgr)
+ throws InvocationTargetException, IllegalAccessException, NoSuchMethodException {
+ try {
+ Method startAndWaitMethod = txnMgr.getClass().getMethod("startAndWait");
+ startAndWaitMethod.invoke(txnMgr);
+ } catch (NoSuchMethodException e) {
+ Method startAsyncMethod = txnMgr.getClass().getMethod("startAsync");
+ Method awaitRunningMethod = txnMgr.getClass().getMethod("awaitRunning");
+ startAsyncMethod.invoke(txnMgr);
+ awaitRunningMethod.invoke(txnMgr);
+ }
+ }
}
diff --git a/ql/pom.xml b/ql/pom.xml
index 152a4cf8a9e3..70b45c83553e 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -907,14 +907,6 @@
org.objenesis
org.apache.hive.org.objenesis
-
- com.google.common
- org.apache.hive.com.google.common
-
-
- com.google.thirdparty.publicsuffix
- org.apache.hive.com.google.thirdparty.publicsuffix
-
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 004bb2f60299..46f658363712 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -22,6 +22,7 @@
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -105,7 +106,8 @@ public class FetchOperator implements Serializable {
private transient Iterator iterPath;
private transient Iterator iterPartDesc;
- private transient Iterator iterSplits = Iterators.emptyIterator();
+ // HIVE-27560: In order to support Guava 20+, change to use JDK API.
+ private transient Iterator iterSplits = Collections.emptyIterator();
private transient Path currPath;
private transient PartitionDesc currDesc;
@@ -540,7 +542,8 @@ public void clearFetchContext() throws HiveException {
this.currPath = null;
this.iterPath = null;
this.iterPartDesc = null;
- this.iterSplits = Iterators.emptyIterator();
+ // HIVE-27560: In order to support Guava 20+, change to use JDK API.
+ this.iterSplits = Collections.emptyIterator();
} catch (Exception e) {
throw new HiveException("Failed with exception " + e.getMessage()
+ StringUtils.stringifyException(e));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index c1f688360cbe..0bda5d27945e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -46,6 +46,7 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
@@ -461,7 +462,9 @@ private void writeVertices(JsonWriter writer, Set vertices) throws IOExc
*/
private String getQueryHash(String queryStr) {
Hasher hasher = Hashing.md5().newHasher();
- hasher.putString(queryStr);
+ // HIVE-27560: In order to support Guava 16+,
+ // need to call `putString` method with `Charset` parameter.
+ hasher.putString(queryStr, StandardCharsets.UTF_8);
return hasher.hash().toString();
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 84c090239df3..46f5d66d1614 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -53,9 +53,9 @@
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
+import org.apache.hive.common.guava.SameThreadExecutorUtil;
import org.apache.thrift.TException;
-import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
@@ -433,7 +433,8 @@ private void checkPartitionDirs(Path basePath, Set allDirs, int maxDepth)
ExecutorService executor;
if (poolSize <= 1) {
LOG.debug("Using single-threaded version of MSCK-GetPaths");
- executor = MoreExecutors.sameThreadExecutor();
+ // HIVE-27560: In order to support Guava 26+, change to use `SameThreadExecutorUtil`.
+ executor = SameThreadExecutorUtil.sameThreadExecutor();
} else {
LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of threads " + poolSize);
ThreadFactory threadFactory =
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
index 93dcc0e0a116..bb87617c524e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
@@ -2416,6 +2416,10 @@ public static class FilterOnProjectRule extends RelOptRule {
public boolean apply(Filter input) {
return input.getCondition() instanceof RexInputRef;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(Filter input) {
+ return this.apply(input);
+ }
};
public static final FilterOnProjectRule INSTANCE =
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 69e157ecd73b..06a4ac2119e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -19,6 +19,7 @@
import java.util.ArrayList;
import java.util.BitSet;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -70,7 +71,6 @@
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -532,7 +532,8 @@ Iterable mappings(final RexNode predicate) {
public Iterator iterator() {
ImmutableBitSet fields = exprFields.get(predicate.toString());
if (fields.cardinality() == 0) {
- return Iterators.emptyIterator();
+ // HIVE-27560: In order to support Guava 20+, change to use JDK API.
+ return Collections.emptyIterator();
}
return new ExprsItr(fields);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 48362a3574dc..e10e5536eef7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -256,6 +256,10 @@ public boolean apply(@Nullable Partition partition) {
}
return (allowEventReplacementInto(partition));
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(Partition input) {
+ return this.apply(input);
+ }
};
}
diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index 26c881223f14..55202ccbdb2c 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -137,6 +137,10 @@ public Integer getValue() {
public boolean apply(HiveSession hiveSession) {
return hiveSession.getNoOperationTime() == 0L;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(HiveSession input) {
+ return apply(input);
+ }
});
return Iterables.size(filtered);
}
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 277738fac705..ea2ac0b4ce7b 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -193,6 +193,10 @@ public boolean apply(AclEntry input) {
}
return false;
}
+
+ public boolean test(AclEntry input) {
+ return this.apply(input);
+ }
});
}
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java b/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
index 0f03a6406353..17f82f2e0d2d 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
@@ -104,6 +104,10 @@ public Metrics getTaskMetrics(final int jobId, final int stageId, final long tas
public boolean apply(TaskInfo input) {
return jobId == input.jobId && stageId == input.stageId && taskId == input.taskId;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(TaskInfo input) {
+ return this.apply(input);
+ }
};
lock.readLock().lock();
try {
@@ -257,6 +261,10 @@ private static class JobFilter implements Predicate {
public boolean apply(TaskInfo input) {
return jobId == input.jobId;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(TaskInfo input) {
+ return this.apply(input);
+ }
}
@@ -274,6 +282,10 @@ private static class StageFilter implements Predicate {
public boolean apply(TaskInfo input) {
return jobId == input.jobId && stageId == input.stageId;
}
+ // HIVE-27560: In order to support Guava 21+, need to add the `test` method.
+ public boolean test(TaskInfo input) {
+ return this.apply(input);
+ }
}