diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala index 9f99bf5011569..e4945a28d9e14 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala @@ -21,7 +21,7 @@ import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} import java.time.{Instant, LocalDate, ZoneOffset} -import org.apache.spark.sql.Row +import org.apache.spark.sql.{Dataset, Row} import org.apache.spark.sql.catalyst.util.{DateFormatter, DateTimeUtils, LegacyDateFormats, TimestampFormatter} import org.apache.spark.sql.execution.command.{DescribeCommandBase, ExecutedCommandExec, ShowTablesCommand, ShowViewsCommand} import org.apache.spark.sql.execution.datasources.v2.{DescribeTableExec, ShowTablesExec} @@ -37,30 +37,45 @@ object HiveResult { * Returns the result as a hive compatible sequence of strings. This is used in tests and * `SparkSQLDriver` for CLI applications. */ - def hiveResultString(executedPlan: SparkPlan): Seq[String] = executedPlan match { - case ExecutedCommandExec(_: DescribeCommandBase) => - formatDescribeTableOutput(executedPlan.executeCollectPublic()) - case _: DescribeTableExec => - formatDescribeTableOutput(executedPlan.executeCollectPublic()) - // SHOW TABLES in Hive only output table names while our v1 command outputs - // database, table name, isTemp. - case command @ ExecutedCommandExec(s: ShowTablesCommand) if !s.isExtended => - command.executeCollect().map(_.getString(1)) - // SHOW TABLES in Hive only output table names while our v2 command outputs - // namespace and table name. - case command : ShowTablesExec => - command.executeCollect().map(_.getString(1)) - // SHOW VIEWS in Hive only outputs view names while our v1 command outputs - // namespace, viewName, and isTemporary. - case command @ ExecutedCommandExec(_: ShowViewsCommand) => - command.executeCollect().map(_.getString(1)) - case other => - val result: Seq[Seq[Any]] = other.executeCollectPublic().map(_.toSeq).toSeq - // We need the types so we can output struct field names - val types = executedPlan.output.map(_.dataType) - // Reformat to match hive tab delimited output. - result.map(_.zip(types).map(e => toHiveString(e))) - .map(_.mkString("\t")) + def hiveResultString(ds: Dataset[_]): Seq[String] = { + val executedPlan = ds.queryExecution.executedPlan + executedPlan match { + case ExecutedCommandExec(_: DescribeCommandBase) => + formatDescribeTableOutput(executedPlan.executeCollectPublic()) + case _: DescribeTableExec => + formatDescribeTableOutput(executedPlan.executeCollectPublic()) + // SHOW TABLES in Hive only output table names while our v1 command outputs + // database, table name, isTemp. + case command @ ExecutedCommandExec(s: ShowTablesCommand) if !s.isExtended => + command.executeCollect().map(_.getString(1)) + // SHOW TABLES in Hive only output table names while our v2 command outputs + // namespace and table name. + case command : ShowTablesExec => + command.executeCollect().map(_.getString(1)) + // SHOW VIEWS in Hive only outputs view names while our v1 command outputs + // namespace, viewName, and isTemporary. + case command @ ExecutedCommandExec(_: ShowViewsCommand) => + command.executeCollect().map(_.getString(1)) + case _ => + val sessionWithJava8DatetimeEnabled = { + val cloned = ds.sparkSession.cloneSession() + cloned.conf.set(SQLConf.DATETIME_JAVA8API_ENABLED.key, true) + cloned + } + sessionWithJava8DatetimeEnabled.withActive { + // We cannot collect the original dataset because its encoders could be created + // with disabled Java 8 date-time API. + val result: Seq[Seq[Any]] = Dataset.ofRows(ds.sparkSession, ds.logicalPlan) + .queryExecution + .executedPlan + .executeCollectPublic().map(_.toSeq).toSeq + // We need the types so we can output struct field names + val types = executedPlan.output.map(_.dataType) + // Reformat to match hive tab delimited output. + result.map(_.zip(types).map(e => toHiveString(e))) + .map(_.mkString("\t")) + } + } } private def formatDescribeTableOutput(rows: Array[Row]): Seq[String] = { diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out index 151fa1e28d725..1d862ba8a41a8 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out @@ -584,7 +584,7 @@ select make_date(-44, 3, 15) -- !query schema struct -- !query output -0045-03-15 +-0044-03-15 -- !query diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala index 92da58c27a141..f25aa1d9594ff 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala @@ -515,7 +515,7 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession { val schema = df.schema.catalogString // Get answer, but also get rid of the #1234 expression ids that show up in explain plans val answer = SQLExecution.withNewExecutionId(df.queryExecution, Some(sql)) { - hiveResultString(df.queryExecution.executedPlan).map(replaceNotIncludedMsg) + hiveResultString(df).map(replaceNotIncludedMsg) } // If the output is not pre-sorted, sort it. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala index a0b212d2cf6fd..a040b22d22786 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution -import org.apache.spark.sql.catalyst.util.DateTimeTestUtils +import org.apache.spark.sql.catalyst.util.DateTimeTestUtils._ import org.apache.spark.sql.connector.InMemoryTableCatalog import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSparkSession} @@ -25,34 +25,45 @@ import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSparkSess class HiveResultSuite extends SharedSparkSession { import testImplicits._ - test("date formatting in hive result") { - DateTimeTestUtils.outstandingTimezonesIds.foreach { zoneId => - withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> zoneId) { - val dates = Seq("2018-12-28", "1582-10-03", "1582-10-04", "1582-10-15") - val df = dates.toDF("a").selectExpr("cast(a as date) as b") - val executedPlan1 = df.queryExecution.executedPlan - val result = HiveResult.hiveResultString(executedPlan1) - assert(result == dates) - val executedPlan2 = df.selectExpr("array(b)").queryExecution.executedPlan - val result2 = HiveResult.hiveResultString(executedPlan2) - assert(result2 == dates.map(x => s"[$x]")) + private def withOutstandingZoneIds(f: => Unit): Unit = { + for { + jvmZoneId <- outstandingZoneIds + sessionZoneId <- outstandingZoneIds + } { + withDefaultTimeZone(jvmZoneId) { + withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> sessionZoneId.getId) { + f + } } } } + test("date formatting in hive result") { + withOutstandingZoneIds { + val dates = Seq("2018-12-28", "1582-10-03", "1582-10-04", "1582-10-15") + val df = dates.toDF("a").selectExpr("cast(a as date) as b") + val result = HiveResult.hiveResultString(df) + assert(result == dates) + val df2 = df.selectExpr("array(b)") + val result2 = HiveResult.hiveResultString(df2) + assert(result2 == dates.map(x => s"[$x]")) + } + } + test("timestamp formatting in hive result") { - val timestamps = Seq( - "2018-12-28 01:02:03", - "1582-10-03 01:02:03", - "1582-10-04 01:02:03", - "1582-10-15 01:02:03") - val df = timestamps.toDF("a").selectExpr("cast(a as timestamp) as b") - val executedPlan1 = df.queryExecution.executedPlan - val result = HiveResult.hiveResultString(executedPlan1) - assert(result == timestamps) - val executedPlan2 = df.selectExpr("array(b)").queryExecution.executedPlan - val result2 = HiveResult.hiveResultString(executedPlan2) - assert(result2 == timestamps.map(x => s"[$x]")) + withOutstandingZoneIds { + val timestamps = Seq( + "2018-12-28 01:02:03", + "1582-10-03 01:02:03", + "1582-10-04 01:02:03", + "1582-10-15 01:02:03") + val df = timestamps.toDF("a").selectExpr("cast(a as timestamp) as b") + val result = HiveResult.hiveResultString(df) + assert(result == timestamps) + val df2 = df.selectExpr("array(b)") + val result2 = HiveResult.hiveResultString(df2) + assert(result2 == timestamps.map(x => s"[$x]")) + } } test("toHiveString correctly handles UDTs") { @@ -64,15 +75,14 @@ class HiveResultSuite extends SharedSparkSession { test("decimal formatting in hive result") { val df = Seq(new java.math.BigDecimal("1")).toDS() Seq(2, 6, 18).foreach { scala => - val executedPlan = - df.selectExpr(s"CAST(value AS decimal(38, $scala))").queryExecution.executedPlan - val result = HiveResult.hiveResultString(executedPlan) + val decimalDf = df.selectExpr(s"CAST(value AS decimal(38, $scala))") + val result = HiveResult.hiveResultString(decimalDf) assert(result.head.split("\\.").last.length === scala) } - val executedPlan = Seq(java.math.BigDecimal.ZERO).toDS() - .selectExpr(s"CAST(value AS decimal(38, 8))").queryExecution.executedPlan - val result = HiveResult.hiveResultString(executedPlan) + val df2 = Seq(java.math.BigDecimal.ZERO).toDS() + .selectExpr(s"CAST(value AS decimal(38, 8))") + val result = HiveResult.hiveResultString(df2) assert(result.head === "0.00000000") } @@ -83,8 +93,7 @@ class HiveResultSuite extends SharedSparkSession { withTable(s"$ns.$tbl") { spark.sql(s"CREATE TABLE $ns.$tbl (id bigint) USING $source") val df = spark.sql(s"SHOW TABLES FROM $ns") - val executedPlan = df.queryExecution.executedPlan - assert(HiveResult.hiveResultString(executedPlan).head == tbl) + assert(HiveResult.hiveResultString(df).head == tbl) } } } @@ -97,11 +106,10 @@ class HiveResultSuite extends SharedSparkSession { withTable(s"$ns.$tbl") { spark.sql(s"CREATE TABLE $ns.$tbl (id bigint COMMENT 'col1') USING $source") val df = spark.sql(s"DESCRIBE $ns.$tbl") - val executedPlan = df.queryExecution.executedPlan val expected = "id " + "\tbigint " + "\tcol1 " - assert(HiveResult.hiveResultString(executedPlan).head == expected) + assert(HiveResult.hiveResultString(df).head == expected) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala index 53f9757750735..c3a1e3109d0d7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala @@ -144,15 +144,14 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession { withClue(s"Function '${info.getName}', Expression class '$className'") { val example = info.getExamples checkExampleSyntax(example) - example.split(" > ").toList.foreach { + example.split(" > ").toList.foreach(_ match { case exampleRe(sql, output) => val df = clonedSpark.sql(sql) - val actual = unindentAndTrim( - hiveResultString(df.queryExecution.executedPlan).mkString("\n")) + val actual = unindentAndTrim(hiveResultString(df).mkString("\n")) val expected = unindentAndTrim(output) assert(actual === expected) case _ => - } + }) } } } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala index 12fba0eae6dce..64e91f405d613 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala @@ -60,9 +60,10 @@ private[hive] class SparkSQLDriver(val context: SQLContext = SparkSQLEnv.sqlCont // TODO unify the error code try { context.sparkContext.setJobDescription(command) - val execution = context.sessionState.executePlan(context.sql(command).logicalPlan) + val df = context.sql(command) + val execution = df.queryExecution hiveResponse = SQLExecution.withNewExecutionId(execution) { - hiveResultString(execution.executedPlan) + hiveResultString(df) } tableSchema = getResultSetSchema(execution) new CommandProcessorResponse(0) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala index 2e4c01830432f..a30fa576fc92d 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala @@ -345,7 +345,9 @@ abstract class HiveComparisonTest extends SparkFunSuite with BeforeAndAfterAll { val catalystResults = queryList.zip(hiveResults).map { case (queryString, hive) => val query = new TestHiveQueryExecution(queryString.replace("../../data", testDataPath)) def getResult(): Seq[String] = { - SQLExecution.withNewExecutionId(query)(hiveResultString(query.executedPlan)) + SQLExecution.withNewExecutionId(query) { + hiveResultString(Dataset.ofRows(query.sparkSession, query.logical)) + } } try { (query, prepareAnswer(query, getResult())) } catch { case e: Throwable =>