From 85a6e35d834eabef0bdcf9ff5bcf16eea669c828 Mon Sep 17 00:00:00 2001 From: panbingkun Date: Wed, 8 May 2024 22:44:39 -0700 Subject: [PATCH] [SPARK-48182][SQL] SQL (java side): Migrate `error/warn/info` with variables to structured logging framework ### What changes were proposed in this pull request? The pr aims to 1.migrate `error/warn/info` in module `SQL` with variables to `structured logging framework` for java side. 2.convert all dependencies on `org.slf4j.Logger & org.slf4j.LoggerFactory` to `org.apache.spark.internal.Logger & org.apache.spark.internal.LoggerFactory`, in order to completely `prohibit` importing `org.slf4j.Logger & org.slf4j.LoggerFactory` in java code later. ### Why are the changes needed? To enhance Apache Spark's logging system by implementing structured logging. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? - Pass GA. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #46450 from panbingkun/sql_java_sl. Authored-by: panbingkun Signed-off-by: Gengliang Wang --- .../org/apache/spark/internal/Logger.java | 4 ++ .../org/apache/spark/internal/LogKey.scala | 13 +++++ .../expressions/RowBasedKeyValueBatch.java | 11 +++-- .../sql/util/CaseInsensitiveStringMap.java | 18 ++++--- .../apache/hive/service/AbstractService.java | 13 +++-- .../apache/hive/service/CompositeService.java | 14 ++++-- .../org/apache/hive/service/CookieSigner.java | 5 +- .../hive/service/ServiceOperations.java | 12 +++-- .../org/apache/hive/service/ServiceUtils.java | 2 +- .../hive/service/auth/HiveAuthFactory.java | 15 +++--- .../hive/service/auth/HttpAuthUtils.java | 12 +++-- .../service/auth/TSetIpAddressProcessor.java | 7 +-- .../apache/hive/service/cli/CLIService.java | 21 +++++--- .../hive/service/cli/ColumnBasedSet.java | 9 ++-- .../operation/ClassicTableTypeMapping.java | 13 +++-- .../hive/service/cli/operation/Operation.java | 28 ++++++----- .../cli/operation/OperationManager.java | 10 ++-- .../service/cli/session/HiveSessionImpl.java | 49 +++++++++++-------- .../service/cli/session/SessionManager.java | 49 +++++++++++-------- .../service/cli/thrift/ThriftCLIService.java | 16 +++--- .../service/cli/thrift/ThriftHttpServlet.java | 14 ++++-- .../hive/service/server/HiveServer2.java | 12 +++-- .../server/ThreadWithGarbageCleanup.java | 5 +- .../thriftserver/SparkSQLCLIService.scala | 2 +- 24 files changed, 222 insertions(+), 132 deletions(-) diff --git a/common/utils/src/main/java/org/apache/spark/internal/Logger.java b/common/utils/src/main/java/org/apache/spark/internal/Logger.java index d8ab26424bae5..7c54e912b189a 100644 --- a/common/utils/src/main/java/org/apache/spark/internal/Logger.java +++ b/common/utils/src/main/java/org/apache/spark/internal/Logger.java @@ -193,4 +193,8 @@ static MessageThrowable of(String message, Throwable throwable) { return new MessageThrowable(message, throwable); } } + + public org.slf4j.Logger getSlf4jLogger() { + return slf4jLogger; + } } diff --git a/common/utils/src/main/scala/org/apache/spark/internal/LogKey.scala b/common/utils/src/main/scala/org/apache/spark/internal/LogKey.scala index 14e822c6349f3..78be240619405 100644 --- a/common/utils/src/main/scala/org/apache/spark/internal/LogKey.scala +++ b/common/utils/src/main/scala/org/apache/spark/internal/LogKey.scala @@ -230,6 +230,7 @@ object LogKeys { case object FROM_TIME extends LogKey case object FUNCTION_NAME extends LogKey case object FUNCTION_PARAMETER extends LogKey + case object GLOBAL_INIT_FILE extends LogKey case object GLOBAL_WATERMARK extends LogKey case object GROUP_BY_EXPRS extends LogKey case object GROUP_ID extends LogKey @@ -275,6 +276,7 @@ object LogKeys { case object KAFKA_RECORDS_PULLED_COUNT extends LogKey case object KEY extends LogKey case object KEYTAB extends LogKey + case object KEYTAB_FILE extends LogKey case object LABEL_COLUMN extends LogKey case object LARGEST_CLUSTER_INDEX extends LogKey case object LAST_ACCESS_TIME extends LogKey @@ -290,6 +292,7 @@ object LogKeys { case object LOADED_VERSION extends LogKey case object LOAD_FACTOR extends LogKey case object LOAD_TIME extends LogKey + case object LOCAL_SCRATCH_DIR extends LogKey case object LOCATION extends LogKey case object LOGICAL_PLAN_COLUMNS extends LogKey case object LOGICAL_PLAN_LEAVES extends LogKey @@ -411,6 +414,8 @@ object LogKeys { case object OLD_GENERATION_GC extends LogKey case object OLD_VALUE extends LogKey case object OPEN_COST_IN_BYTES extends LogKey + case object OPERATION_HANDLE extends LogKey + case object OPERATION_HANDLE_IDENTIFIER extends LogKey case object OPTIMIZED_PLAN_COLUMNS extends LogKey case object OPTIMIZER_CLASS_NAME extends LogKey case object OPTIONS extends LogKey @@ -458,6 +463,7 @@ object LogKeys { case object PROCESSING_TIME extends LogKey case object PRODUCER_ID extends LogKey case object PROPERTY_NAME extends LogKey + case object PROTOCOL_VERSION extends LogKey case object PROVIDER extends LogKey case object PUSHED_FILTERS extends LogKey case object PVC_METADATA_NAME extends LogKey @@ -523,9 +529,11 @@ object LogKeys { case object SERVER_NAME extends LogKey case object SERVICE_NAME extends LogKey case object SERVLET_CONTEXT_HANDLER_PATH extends LogKey + case object SESSION_HANDLE extends LogKey case object SESSION_HOLD_INFO extends LogKey case object SESSION_ID extends LogKey case object SESSION_KEY extends LogKey + case object SET_CLIENT_INFO_REQUEST extends LogKey case object SHARD_ID extends LogKey case object SHELL_COMMAND extends LogKey case object SHUFFLE_BLOCK_INFO extends LogKey @@ -578,6 +586,7 @@ object LogKeys { case object SUBSAMPLING_RATE extends LogKey case object SUB_QUERY extends LogKey case object TABLE_NAME extends LogKey + case object TABLE_TYPE extends LogKey case object TABLE_TYPES extends LogKey case object TARGET_NUM_EXECUTOR extends LogKey case object TARGET_NUM_EXECUTOR_DELTA extends LogKey @@ -595,6 +604,9 @@ object LogKeys { case object THREAD extends LogKey case object THREAD_ID extends LogKey case object THREAD_NAME extends LogKey + case object THREAD_POOL_KEEPALIVE_TIME extends LogKey + case object THREAD_POOL_SIZE extends LogKey + case object THREAD_POOL_WAIT_QUEUE_SIZE extends LogKey case object TID extends LogKey case object TIME extends LogKey case object TIMEOUT extends LogKey @@ -602,6 +614,7 @@ object LogKeys { case object TIMESTAMP extends LogKey case object TIME_UNITS extends LogKey case object TIP extends LogKey + case object TOKEN extends LogKey case object TOKEN_KIND extends LogKey case object TOKEN_REGEX extends LogKey case object TOPIC extends LogKey diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java index 6a74f64d44849..be7e682a3bdf5 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java @@ -19,16 +19,16 @@ import java.io.Closeable; import java.io.IOException; +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; import org.apache.spark.memory.MemoryConsumer; import org.apache.spark.memory.SparkOutOfMemoryError; import org.apache.spark.memory.TaskMemoryManager; import org.apache.spark.sql.types.*; import org.apache.spark.unsafe.memory.MemoryBlock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - /** * RowBasedKeyValueBatch stores key value pairs in contiguous memory region. * @@ -127,7 +127,8 @@ private boolean acquirePage(long requiredSize) { try { page = allocatePage(requiredSize); } catch (SparkOutOfMemoryError e) { - logger.warn("Failed to allocate page ({} bytes).", requiredSize); + logger.warn("Failed to allocate page ({} bytes).", + MDC.of(LogKeys.PAGE_SIZE$.MODULE$, requiredSize)); return false; } base = page.getBaseObject(); diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/util/CaseInsensitiveStringMap.java b/sql/catalyst/src/main/java/org/apache/spark/sql/util/CaseInsensitiveStringMap.java index 00a3de692fbf4..d66524d841ca6 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/util/CaseInsensitiveStringMap.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/util/CaseInsensitiveStringMap.java @@ -17,12 +17,6 @@ package org.apache.spark.sql.util; -import org.apache.spark.SparkIllegalArgumentException; -import org.apache.spark.SparkUnsupportedOperationException; -import org.apache.spark.annotation.Experimental; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -31,6 +25,14 @@ import java.util.Objects; import java.util.Set; +import org.apache.spark.annotation.Experimental; +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; +import org.apache.spark.SparkIllegalArgumentException; +import org.apache.spark.SparkUnsupportedOperationException; + /** * Case-insensitive map of string keys to string values. *

@@ -59,8 +61,8 @@ public CaseInsensitiveStringMap(Map originalMap) { for (Map.Entry entry : originalMap.entrySet()) { String key = toLowerCase(entry.getKey()); if (delegate.containsKey(key)) { - logger.warn("Converting duplicated key " + entry.getKey() + - " into CaseInsensitiveStringMap."); + logger.warn("Converting duplicated key {} into CaseInsensitiveStringMap.", + MDC.of(LogKeys.KEY$.MODULE$, entry.getKey())); } delegate.put(key, entry.getValue()); } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java index 6481cf15075a7..009b9f253ce0d 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java @@ -21,8 +21,11 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * AbstractService. @@ -85,7 +88,7 @@ public synchronized void init(HiveConf hiveConf) { ensureCurrentState(STATE.NOTINITED); this.hiveConf = hiveConf; changeState(STATE.INITED); - LOG.info("Service:" + getName() + " is inited."); + LOG.info("Service:{} is inited.", MDC.of(LogKeys.SERVICE_NAME$.MODULE$, getName())); } /** @@ -100,7 +103,7 @@ public synchronized void start() { startTime = System.currentTimeMillis(); ensureCurrentState(STATE.INITED); changeState(STATE.STARTED); - LOG.info("Service:" + getName() + " is started."); + LOG.info("Service:{} is started.", MDC.of(LogKeys.SERVICE_NAME$.MODULE$, getName())); } /** @@ -121,7 +124,7 @@ public synchronized void stop() { } ensureCurrentState(STATE.STARTED); changeState(STATE.STOPPED); - LOG.info("Service:" + getName() + " is stopped."); + LOG.info("Service:{} is stopped.", MDC.of(LogKeys.SERVICE_NAME$.MODULE$, getName())); } @Override diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java index 55c1aa52b95ca..ecd9de8154b31 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java @@ -23,8 +23,11 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * CompositeService. @@ -70,7 +73,7 @@ public synchronized void start() { } super.start(); } catch (Throwable e) { - LOG.error("Error starting services " + getName(), e); + LOG.error("Error starting services {}", e, MDC.of(LogKeys.SERVICE_NAME$.MODULE$, getName())); // Note that the state of the failed service is still INITED and not // STARTED. Even though the last service is not started completely, still // call stop() on all services including failed service to make sure cleanup @@ -100,7 +103,7 @@ private synchronized void stop(int numOfServicesStarted) { try { service.stop(); } catch (Throwable t) { - LOG.info("Error stopping " + service.getName(), t); + LOG.info("Error stopping {}", t, MDC.of(LogKeys.SERVICE_NAME$.MODULE$, service.getName())); } } } @@ -123,7 +126,8 @@ public void run() { // Stop the Composite Service compositeService.stop(); } catch (Throwable t) { - LOG.info("Error stopping " + compositeService.getName(), t); + LOG.info("Error stopping {}", t, + MDC.of(LogKeys.SERVICE_NAME$.MODULE$, compositeService.getName())); } } } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java index 4b8d2cb1536cd..25e0316d5e9c3 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java @@ -21,8 +21,9 @@ import java.security.NoSuchAlgorithmException; import org.apache.commons.codec.binary.Base64; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; /** * The cookie signer generates a signature based on SHA digest diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java index 434676aa8d215..d947f01681bea 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java @@ -18,8 +18,11 @@ package org.apache.hive.service; import org.apache.hadoop.hive.conf.HiveConf; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * ServiceOperations. @@ -129,9 +132,8 @@ public static Exception stopQuietly(Service service) { try { stop(service); } catch (Exception e) { - LOG.warn("When stopping the service " + service.getName() - + " : " + e, - e); + LOG.warn("When stopping the service {}", e, + MDC.of(LogKeys.SERVICE_NAME$.MODULE$, service.getName())); return e; } return null; diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java index 7552bda57dc0b..82ef4b9f9ce70 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java @@ -18,7 +18,7 @@ import java.io.IOException; -import org.slf4j.Logger; +import org.apache.spark.internal.Logger; public class ServiceUtils { diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java index c48f4e3ec7b09..b570e88e2bc5b 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java @@ -42,16 +42,19 @@ import org.apache.thrift.TProcessorFactory; import org.apache.thrift.transport.TTransportException; import org.apache.thrift.transport.TTransportFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * This class helps in some aspects of authentication. It creates the proper Thrift classes for the * given configuration as well as helps with authenticating requests. */ public class HiveAuthFactory { - private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class); + private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class); public enum AuthTypes { NOSASL("NOSASL"), @@ -285,9 +288,9 @@ public String verifyDelegationToken(String delegationToken) throws HiveSQLExcept try { return delegationTokenManager.verifyDelegationToken(delegationToken); } catch (IOException e) { - String msg = "Error verifying delegation token " + delegationToken; - LOG.error(msg, e); - throw new HiveSQLException(msg, "08S01", e); + String msg = "Error verifying delegation token"; + LOG.error(msg + " {}", e, MDC.of(LogKeys.TOKEN$.MODULE$, delegationToken)); + throw new HiveSQLException(msg + delegationToken, "08S01", e); } } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java index 08a8258db06f2..0bfe361104dea 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java @@ -39,8 +39,11 @@ import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; import org.ietf.jgss.Oid; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * Utility functions for HTTP mode authentication. @@ -109,7 +112,8 @@ public static String getUserNameFromCookieToken(String tokenStr) { Map map = splitCookieToken(tokenStr); if (!map.keySet().equals(COOKIE_ATTRIBUTES)) { - LOG.error("Invalid token with missing attributes " + tokenStr); + LOG.error("Invalid token with missing attributes {}", + MDC.of(LogKeys.TOKEN$.MODULE$, tokenStr)); return null; } return map.get(COOKIE_CLIENT_USER_NAME); @@ -129,7 +133,7 @@ private static Map splitCookieToken(String tokenStr) { String part = st.nextToken(); int separator = part.indexOf(COOKIE_KEY_VALUE_SEPARATOR); if (separator == -1) { - LOG.error("Invalid token string " + tokenStr); + LOG.error("Invalid token string {}", MDC.of(LogKeys.TOKEN$.MODULE$, tokenStr)); return null; } String key = part.substring(0, separator); diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java index 1205d21be6be6..8e7d8e60c176b 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java @@ -25,8 +25,9 @@ import org.apache.thrift.transport.TSaslServerTransport; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; /** * This class is responsible for setting the ipAddress for operations executed via HiveServer2. @@ -38,7 +39,7 @@ */ public class TSetIpAddressProcessor extends TCLIService.Processor { - private static final Logger LOGGER = LoggerFactory.getLogger(TSetIpAddressProcessor.class.getName()); + private static final Logger LOGGER = LoggerFactory.getLogger(TSetIpAddressProcessor.class); public TSetIpAddressProcessor(Iface iface) { super(iface); diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java index caccb0c4b76f7..e612b34d7bdf7 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java @@ -49,8 +49,11 @@ import org.apache.hive.service.rpc.thrift.TRowSet; import org.apache.hive.service.rpc.thrift.TTableSchema; import org.apache.hive.service.server.HiveServer2; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * CLIService. @@ -99,8 +102,9 @@ public synchronized void init(HiveConf hiveConf) { String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL); String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB); if (principal.isEmpty() || keyTabFile.isEmpty()) { - LOG.info("SPNego httpUGI not created, spNegoPrincipal: " + principal + - ", ketabFile: " + keyTabFile); + LOG.info("SPNego httpUGI not created, spNegoPrincipal: {}, keytabFile: {}", + MDC.of(LogKeys.PRINCIPAL$.MODULE$, principal), + MDC.of(LogKeys.KEYTAB_FILE$.MODULE$, keyTabFile)); } else { try { this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf); @@ -457,7 +461,8 @@ public OperationStatus getOperationStatus(OperationHandle opHandle) LOG.trace(opHandle + ": The background operation was cancelled", e); } catch (ExecutionException e) { // The background operation thread was aborted - LOG.warn(opHandle + ": The background operation was aborted", e); + LOG.warn("{}: The background operation was aborted", e, + MDC.of(LogKeys.OPERATION_HANDLE$.MODULE$, opHandle)); } catch (InterruptedException e) { // No op, this thread was interrupted // In this case, the call might return sooner than long polling timeout @@ -551,7 +556,7 @@ public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory au String owner, String renewer) throws HiveSQLException { String delegationToken = sessionManager.getSession(sessionHandle) .getDelegationToken(authFactory, owner, renewer); - LOG.info(sessionHandle + ": getDelegationToken()"); + LOG.info("{}: getDelegationToken()", MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, sessionHandle)); return delegationToken; } @@ -559,14 +564,14 @@ public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory au public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { sessionManager.getSession(sessionHandle).cancelDelegationToken(authFactory, tokenStr); - LOG.info(sessionHandle + ": cancelDelegationToken()"); + LOG.info("{}: cancelDelegationToken()", MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, sessionHandle)); } @Override public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { sessionManager.getSession(sessionHandle).renewDelegationToken(authFactory, tokenStr); - LOG.info(sessionHandle + ": renewDelegationToken()"); + LOG.info("{}: renewDelegationToken()", MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, sessionHandle)); } @Override diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java index 629d9abdac2c0..f6a269e99251d 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java @@ -30,8 +30,11 @@ import org.apache.thrift.protocol.TCompactProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.TIOStreamTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * ColumnBasedSet. @@ -68,7 +71,7 @@ public ColumnBasedSet(TRowSet tRowSet) throws TException { try { tvalue.read(protocol); } catch (TException e) { - LOG.error(e.getMessage(), e); + LOG.error("{}", e, MDC.of(LogKeys.ERROR$.MODULE$, e.getMessage())); throw new TException("Error reading column value from the row set blob", e); } columns.add(new ColumnBuffer(tvalue)); diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java index 96c16beac7c4d..3876632211715 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java @@ -28,8 +28,11 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import org.apache.hadoop.hive.metastore.TableType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * ClassicTableTypeMapping. @@ -69,7 +72,8 @@ public ClassicTableTypeMapping() { public String[] mapToHiveType(String clientTypeName) { Collection hiveTableType = clientToHiveMap.get(clientTypeName.toUpperCase()); if (hiveTableType == null) { - LOG.warn("Not supported client table type " + clientTypeName); + LOG.warn("Not supported client table type {}", + MDC.of(LogKeys.TABLE_TYPE$.MODULE$, clientTypeName)); return new String[] {clientTypeName}; } return Iterables.toArray(hiveTableType, String.class); @@ -79,7 +83,8 @@ public String[] mapToHiveType(String clientTypeName) { public String mapToClientType(String hiveTypeName) { String clientTypeName = hiveToClientMap.get(hiveTypeName); if (clientTypeName == null) { - LOG.warn("Invalid hive table type " + hiveTypeName); + LOG.warn("Invalid hive table type {}", + MDC.of(LogKeys.TABLE_TYPE$.MODULE$, hiveTypeName)); return hiveTypeName; } return clientTypeName; diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java index ad42925207d69..135420508e21e 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java @@ -38,15 +38,18 @@ import org.apache.hive.service.rpc.thrift.TProtocolVersion; import org.apache.hive.service.rpc.thrift.TRowSet; import org.apache.hive.service.rpc.thrift.TTableSchema; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; public abstract class Operation { protected final HiveSession parentSession; private OperationState state = OperationState.INITIALIZED; private final OperationHandle opHandle; private HiveConf configuration; - public static final Logger LOG = LoggerFactory.getLogger(Operation.class.getName()); + public static final Logger LOG = LoggerFactory.getLogger(Operation.class); public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = FetchOrientation.FETCH_NEXT; public static final long DEFAULT_FETCH_MAX_ROWS = 100; protected boolean hasResultSet; @@ -208,8 +211,8 @@ protected void createOperationLog() { // create log file try { if (operationLogFile.exists()) { - LOG.warn("The operation log file should not exist, but it is already there: " + - operationLogFile.getAbsolutePath()); + LOG.warn("The operation log file should not exist, but it is already there: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogFile.getAbsolutePath())); operationLogFile.delete(); } if (!operationLogFile.createNewFile()) { @@ -217,13 +220,15 @@ protected void createOperationLog() { // If it can be read/written, keep its contents and use it. if (!operationLogFile.canRead() || !operationLogFile.canWrite()) { LOG.warn("The already existed operation log file cannot be recreated, " + - "and it cannot be read or written: " + operationLogFile.getAbsolutePath()); + "and it cannot be read or written: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogFile.getAbsolutePath())); isOperationLogEnabled = false; return; } } } catch (Exception e) { - LOG.warn("Unable to create operation log file: " + operationLogFile.getAbsolutePath(), e); + LOG.warn("Unable to create operation log file: {}", e, + MDC.of(LogKeys.PATH$.MODULE$, operationLogFile.getAbsolutePath())); isOperationLogEnabled = false; return; } @@ -232,8 +237,8 @@ protected void createOperationLog() { try { operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } catch (FileNotFoundException e) { - LOG.warn("Unable to instantiate OperationLog object for operation: " + - opHandle, e); + LOG.warn("Unable to instantiate OperationLog object for operation: {}", e, + MDC.of(LogKeys.OPERATION_HANDLE$.MODULE$, opHandle)); isOperationLogEnabled = false; return; } @@ -283,8 +288,9 @@ public void run() throws HiveSQLException { protected void cleanupOperationLog() { if (isOperationLogEnabled) { if (operationLog == null) { - LOG.error("Operation [ " + opHandle.getHandleIdentifier() + " ] " - + "logging is enabled, but its OperationLog object cannot be found."); + LOG.error("Operation [ {} ] logging is enabled, " + + "but its OperationLog object cannot be found.", + MDC.of(LogKeys.OPERATION_HANDLE_IDENTIFIER$.MODULE$, opHandle.getHandleIdentifier())); } else { operationLog.close(); } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java index bb68c840496ad..1498cb4907f1f 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -40,8 +40,11 @@ import org.apache.hive.service.rpc.thrift.TRowSet; import org.apache.hive.service.rpc.thrift.TTableSchema; import org.apache.logging.log4j.core.Appender; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * OperationManager. @@ -289,7 +292,8 @@ public List removeExpiredOperations(OperationHandle[] handles) { for (OperationHandle handle : handles) { Operation operation = removeTimedOutOperation(handle); if (operation != null) { - LOG.warn("Operation " + handle + " is timed-out and will be closed"); + LOG.warn("Operation {} is timed-out and will be closed", + MDC.of(LogKeys.OPERATION_HANDLE$.MODULE$, handle)); removed.add(operation); } } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index e00d2705d4172..e073fa4713bfb 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -69,8 +69,11 @@ import org.apache.hive.service.rpc.thrift.TRowSet; import org.apache.hive.service.rpc.thrift.TTableSchema; import org.apache.hive.service.server.ThreadWithGarbageCleanup; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; import static org.apache.hadoop.hive.conf.SystemVariables.ENV_PREFIX; import static org.apache.hadoop.hive.conf.SystemVariables.HIVECONF_PREFIX; @@ -116,7 +119,7 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { - LOG.warn("Error setting scheduler queue: " + e, e); + LOG.warn("Error setting scheduler queue: ", e); } // Set an explicit session name to control the download directory name hiveConf.set("hive.session.id", @@ -146,8 +149,8 @@ public void open(Map sessionConfMap) throws HiveSQLException { sessionState.loadAuxJars(); sessionState.loadReloadableAuxJars(); } catch (IOException e) { - String msg = "Failed to load reloadable jar file path: " + e; - LOG.error(msg, e); + String msg = "Failed to load reloadable jar file path."; + LOG.error("{}", e, MDC.of(LogKeys.ERROR$.MODULE$, msg)); throw new HiveSQLException(msg, e); } // Process global init file: .hiverc @@ -197,7 +200,8 @@ private void processGlobalInitFile() { hivercFile = new File(hivercFile, SessionManager.HIVERCFILE); } if (hivercFile.isFile()) { - LOG.info("Running global init file: " + hivercFile); + LOG.info("Running global init file: {}", + MDC.of(LogKeys.GLOBAL_INIT_FILE$.MODULE$, hivercFile)); int rc = processor.processFile(hivercFile.getAbsolutePath()); if (rc != 0) { LOG.error("Failed on initializing global .hiverc file"); @@ -297,28 +301,29 @@ private static void setConf(String varname, String key, String varvalue, boolean @Override public void setOperationLogSessionDir(File operationLogRootDir) { if (!operationLogRootDir.exists()) { - LOG.warn("The operation log root directory is removed, recreating: " + - operationLogRootDir.getAbsolutePath()); + LOG.warn("The operation log root directory is removed, recreating: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); if (!operationLogRootDir.mkdirs()) { - LOG.warn("Unable to create operation log root directory: " + - operationLogRootDir.getAbsolutePath()); + LOG.warn("Unable to create operation log root directory: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); } } if (!operationLogRootDir.canWrite()) { - LOG.warn("The operation log root directory is not writable: " + - operationLogRootDir.getAbsolutePath()); + LOG.warn("The operation log root directory is not writable: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); } sessionLogDir = new File(operationLogRootDir, sessionHandle.getHandleIdentifier().toString()); isOperationLogEnabled = true; if (!sessionLogDir.exists()) { if (!sessionLogDir.mkdir()) { - LOG.warn("Unable to create operation log session directory: " + - sessionLogDir.getAbsolutePath()); + LOG.warn("Unable to create operation log session directory: {}", + MDC.of(LogKeys.PATH$.MODULE$, sessionLogDir.getAbsolutePath())); isOperationLogEnabled = false; } } if (isOperationLogEnabled) { - LOG.info("Operation log session directory is created: " + sessionLogDir.getAbsolutePath()); + LOG.info("Operation log session directory is created: {}", + MDC.of(LogKeys.PATH$.MODULE$, sessionLogDir.getAbsolutePath())); } } @@ -653,7 +658,8 @@ public void close() throws HiveSQLException { try { operationManager.closeOperation(opHandle); } catch (Exception e) { - LOG.warn("Exception is thrown closing operation " + opHandle, e); + LOG.warn("Exception is thrown closing operation {}", e, + MDC.of(LogKeys.OPERATION_HANDLE$.MODULE$, opHandle)); } } opHandleSet.clear(); @@ -693,13 +699,14 @@ private void cleanupPipeoutFile() { (dir, name) -> name.startsWith(sessionID) && name.endsWith(".pipeout")); if (fileAry == null) { - LOG.error("Unable to access pipeout files in " + lScratchDir); + LOG.error("Unable to access pipeout files in {}", + MDC.of(LogKeys.LOCAL_SCRATCH_DIR$.MODULE$, lScratchDir)); } else { for (File file : fileAry) { try { FileUtils.forceDelete(file); } catch (Exception e) { - LOG.error("Failed to cleanup pipeout file: " + file, e); + LOG.error("Failed to cleanup pipeout file: {}", e, MDC.of(LogKeys.PATH$.MODULE$, file)); } } } @@ -710,7 +717,8 @@ private void cleanupSessionLogDir() { try { FileUtils.forceDelete(sessionLogDir); } catch (Exception e) { - LOG.error("Failed to cleanup session log dir: " + sessionHandle, e); + LOG.error("Failed to cleanup session log dir: {}", e, + MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, sessionHandle)); } } } @@ -759,7 +767,8 @@ private void closeTimedOutOperations(List operations) { try { operation.close(); } catch (Exception e) { - LOG.warn("Exception is thrown closing timed-out operation " + operation.getHandle(), e); + LOG.warn("Exception is thrown closing timed-out operation {}", e, + MDC.of(LogKeys.OPERATION_HANDLE$.MODULE$, operation.getHandle())); } } } finally { diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java index fa342feacc7f4..6c282b679ca8c 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java @@ -38,8 +38,11 @@ import org.apache.hive.service.rpc.thrift.TProtocolVersion; import org.apache.hive.service.server.HiveServer2; import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * SessionManager. @@ -84,13 +87,15 @@ public synchronized void init(HiveConf hiveConf) { private void createBackgroundOperationPool() { int poolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS); - LOG.info("HiveServer2: Background operation thread pool size: " + poolSize); + LOG.info("HiveServer2: Background operation thread pool size: {}", + MDC.of(LogKeys.THREAD_POOL_SIZE$.MODULE$, poolSize)); int poolQueueSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE); - LOG.info("HiveServer2: Background operation thread wait queue size: " + poolQueueSize); + LOG.info("HiveServer2: Background operation thread wait queue size: {}", + MDC.of(LogKeys.THREAD_POOL_WAIT_QUEUE_SIZE$.MODULE$, poolQueueSize)); long keepAliveTime = HiveConf.getTimeVar( hiveConf, ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME, TimeUnit.SECONDS); - LOG.info( - "HiveServer2: Background operation thread keepalive time: " + keepAliveTime + " seconds"); + LOG.info("HiveServer2: Background operation thread keepalive time: {} ms", + MDC.of(LogKeys.THREAD_POOL_KEEPALIVE_TIME$.MODULE$, keepAliveTime * 1000L)); // Create a thread pool with #poolSize threads // Threads terminate when they are idle for more than the keepAliveTime @@ -115,26 +120,27 @@ private void initOperationLogRootDir() { isOperationLogEnabled = true; if (operationLogRootDir.exists() && !operationLogRootDir.isDirectory()) { - LOG.warn("The operation log root directory exists, but it is not a directory: " + - operationLogRootDir.getAbsolutePath()); + LOG.warn("The operation log root directory exists, but it is not a directory: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); isOperationLogEnabled = false; } if (!operationLogRootDir.exists()) { if (!operationLogRootDir.mkdirs()) { - LOG.warn("Unable to create operation log root directory: " + - operationLogRootDir.getAbsolutePath()); + LOG.warn("Unable to create operation log root directory: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); isOperationLogEnabled = false; } } if (isOperationLogEnabled) { - LOG.info("Operation log root directory is created: " + operationLogRootDir.getAbsolutePath()); + LOG.info("Operation log root directory is created: {}", + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); try { FileUtils.forceDeleteOnExit(operationLogRootDir); } catch (IOException e) { - LOG.warn("Failed to schedule cleanup HS2 operation logging root dir: " + - operationLogRootDir.getAbsolutePath(), e); + LOG.warn("Failed to schedule cleanup HS2 operation logging root dir: {}", e, + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); } } } @@ -164,12 +170,14 @@ public void run() { if (sessionTimeout > 0 && session.getLastAccessTime() + sessionTimeout <= current && (!checkOperation || session.getNoOperationTime() > sessionTimeout)) { SessionHandle handle = session.getSessionHandle(); - LOG.warn("Session " + handle + " is Timed-out (last access : " + - new Date(session.getLastAccessTime()) + ") and will be closed"); + LOG.warn("Session {} is Timed-out (last access : {}) and will be closed", + MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, handle), + MDC.of(LogKeys.LAST_ACCESS_TIME$.MODULE$, new Date(session.getLastAccessTime()))); try { closeSession(handle); } catch (HiveSQLException e) { - LOG.warn("Exception is thrown closing session " + handle, e); + LOG.warn("Exception is thrown closing session {}", e, + MDC.of(LogKeys.SESSION_HANDLE$.MODULE$, handle)); } } else { session.closeExpiredOperations(); @@ -210,8 +218,9 @@ public synchronized void stop() { try { backgroundOperationPool.awaitTermination(timeout, TimeUnit.SECONDS); } catch (InterruptedException e) { - LOG.warn("HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT = " + timeout + - " seconds has been exceeded. RUNNING background operations will be shut down", e); + LOG.warn("HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT = {} ms has been exceeded. " + + "RUNNING background operations will be shut down", e, + MDC.of(LogKeys.TIMEOUT$.MODULE$, timeout * 1000)); } backgroundOperationPool = null; } @@ -223,8 +232,8 @@ private void cleanupLoggingRootDir() { try { FileUtils.forceDelete(operationLogRootDir); } catch (Exception e) { - LOG.warn("Failed to cleanup root dir of HS2 logging: " + operationLogRootDir - .getAbsolutePath(), e); + LOG.warn("Failed to cleanup root dir of HS2 logging: {}", e, + MDC.of(LogKeys.PATH$.MODULE$, operationLogRootDir.getAbsolutePath())); } } } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index 4b18e2950a3de..752cd54af626b 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -41,8 +41,11 @@ import org.apache.thrift.server.ServerContext; import org.apache.thrift.server.TServerEventHandler; import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * ThriftCLIService. @@ -50,7 +53,7 @@ */ public abstract class ThriftCLIService extends AbstractService implements TCLIService.Iface, Runnable { - public static final Logger LOG = LoggerFactory.getLogger(ThriftCLIService.class.getName()); + public static final Logger LOG = LoggerFactory.getLogger(ThriftCLIService.class); protected CLIService cliService; private static final TStatus OK_STATUS = new TStatus(TStatusCode.SUCCESS_STATUS); @@ -106,7 +109,7 @@ public void deleteContext(ServerContext serverContext, try { cliService.closeSession(sessionHandle); } catch (HiveSQLException e) { - LOG.warn("Failed to close session: " + e, e); + LOG.warn("Failed to close session: ", e); } } } @@ -236,7 +239,8 @@ private TStatus notSupportTokenErrorStatus() { @Override public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { - LOG.info("Client protocol version: " + req.getClient_protocol()); + LOG.info("Client protocol version: {}", + MDC.of(LogKeys.PROTOCOL_VERSION$.MODULE$, req.getClient_protocol())); TOpenSessionResp resp = new TOpenSessionResp(); try { SessionHandle sessionHandle = getSessionHandle(req, resp); @@ -272,7 +276,7 @@ public TSetClientInfoResp SetClientInfo(TSetClientInfoReq req) throws TException sb.append(e.getKey()).append(" = ").append(e.getValue()); } if (sb != null) { - LOG.info("{}", sb); + LOG.info("{}", MDC.of(LogKeys.SET_CLIENT_INFO_REQUEST$.MODULE$, sb)); } } return new TSetClientInfoResp(OK_STATUS); diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java index b0bede741cb19..b423038fe2b61 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java @@ -55,8 +55,11 @@ import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; import org.ietf.jgss.Oid; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; /** * @@ -66,7 +69,7 @@ public class ThriftHttpServlet extends TServlet { private static final long serialVersionUID = 1L; - public static final Logger LOG = LoggerFactory.getLogger(ThriftHttpServlet.class.getName()); + public static final Logger LOG = LoggerFactory.getLogger(ThriftHttpServlet.class); private final String authType; private final UserGroupInformation serviceUGI; private final UserGroupInformation httpUGI; @@ -174,7 +177,8 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response) } else { response.addCookie(hs2Cookie); } - LOG.info("Cookie added for clientUserName " + clientUserName); + LOG.info("Cookie added for clientUserName {}", + MDC.of(LogKeys.USER_NAME$.MODULE$, clientUserName)); } super.doPost(request, response); } @@ -228,7 +232,7 @@ private String getClientNameFromCookie(Cookie[] cookies) { String userName = HttpAuthUtils.getUserNameFromCookieToken(currValue); if (userName == null) { - LOG.warn("Invalid cookie token " + currValue); + LOG.warn("Invalid cookie token {}", MDC.of(LogKeys.TOKEN$.MODULE$, currValue)); continue; } //We have found a valid cookie in the client request. diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java index ad5ca51b9e63d..b6c9b937c5f32 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java @@ -36,9 +36,11 @@ import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService; import org.apache.hive.service.cli.thrift.ThriftCLIService; import org.apache.hive.service.cli.thrift.ThriftHttpCLIService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; +import org.apache.spark.internal.LogKeys; +import org.apache.spark.internal.MDC; import org.apache.spark.util.ShutdownHookManager; import org.apache.spark.util.SparkExitCode; @@ -142,8 +144,8 @@ private static void startHiveServer2() throws Throwable { if (++attempts >= maxAttempts) { throw new Error("Max start attempts " + maxAttempts + " exhausted", throwable); } else { - LOG.warn("Error starting HiveServer2 on attempt " + attempts - + ", will retry in 60 seconds", throwable); + LOG.warn("Error starting HiveServer2 on attempt {}, will retry in 60 seconds", + throwable, MDC.of(LogKeys.RETRY_COUNT$.MODULE$, attempts)); try { Thread.sleep(60L * 1000L); } catch (InterruptedException e) { @@ -159,7 +161,7 @@ public static void main(String[] args) { ServerOptionsProcessor oproc = new ServerOptionsProcessor("hiveserver2"); ServerOptionsProcessorResponse oprocResponse = oproc.parse(args); - HiveStringUtils.startupShutdownMessage(HiveServer2.class, args, LOG); + HiveStringUtils.startupShutdownMessage(HiveServer2.class, args, LOG.getSlf4jLogger()); // Call the executor which will execute the appropriate command based on the parsed options oprocResponse.getServerOptionsExecutor().execute(); diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java index afaa1403bfdcd..23957e146ddf1 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java @@ -22,8 +22,9 @@ import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.RawStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.apache.spark.internal.Logger; +import org.apache.spark.internal.LoggerFactory; /** * A HiveServer2 thread used to construct new server threads. diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala index 7262bc22dc429..bf1c4978431b7 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala @@ -33,8 +33,8 @@ import org.apache.hive.service.Service.STATE import org.apache.hive.service.auth.HiveAuthFactory import org.apache.hive.service.cli._ import org.apache.hive.service.server.HiveServer2 -import org.slf4j.Logger +import org.apache.spark.internal.Logger import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.util.SQLKeywordUtils import org.apache.spark.sql.errors.QueryExecutionErrors