From 45d57085490f7a17eb42ea99498604893c4f9907 Mon Sep 17 00:00:00 2001 From: Max Gekk Date: Sun, 24 Apr 2022 19:35:36 +0300 Subject: [PATCH 1/5] Add toSQLConf() --- .../apache/spark/sql/errors/QueryErrorsBase.scala | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala index 4400bedfd5d49..1f8fa1e1b4c86 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala @@ -48,9 +48,13 @@ trait QueryErrorsBase { litToErrorValue(Literal.create(v, t)) } + private def quoteByDefault(elem: String): String = { + "\"" + elem + "\"" + } + // Quote sql statements in error messages. def toSQLStmt(text: String): String = { - "\"" + text.toUpperCase(Locale.ROOT) + "\"" + quoteByDefault(text.toUpperCase(Locale.ROOT)) } def toSQLId(parts: Seq[String]): String = { @@ -62,6 +66,10 @@ trait QueryErrorsBase { } def toSQLType(t: DataType): String = { - "\"" + t.sql + "\"" + quoteByDefault(t.sql) + } + + def toSQLConf(conf: String): String = { + quoteByDefault(conf) } } From 0f0dcfbd322c6380d0831968b319d4db681115ed Mon Sep 17 00:00:00 2001 From: Max Gekk Date: Sun, 24 Apr 2022 19:36:09 +0300 Subject: [PATCH 2/5] Apply toSQLConf() to QueryExecutionErrorsSuite --- .../spark/sql/errors/QueryExecutionErrors.scala | 10 +++++----- .../spark/sql/errors/QueryExecutionErrorsSuite.scala | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index c73b78b264c55..e57977d4120c0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -521,10 +521,10 @@ object QueryExecutionErrors extends QueryErrorsBase { |from $format files can be ambiguous, as the files may be written by |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar |that is different from Spark 3.0+'s Proleptic Gregorian calendar. - |See more details in SPARK-31404. You can set the SQL config '$config' or + |See more details in SPARK-31404. You can set the SQL config ${toSQLConf(config)} or |the datasource option '$option' to 'LEGACY' to rebase the datetime values |w.r.t. the calendar difference during reading. To read the datetime values - |as it is, set the SQL config '$config' or the datasource option '$option' + |as it is, set the SQL config ${toSQLConf(config)} or the datasource option '$option' |to 'CORRECTED'. |""".stripMargin), cause = null @@ -541,10 +541,10 @@ object QueryExecutionErrors extends QueryErrorsBase { |into $format files can be dangerous, as the files may be read by Spark 2.x |or legacy versions of Hive later, which uses a legacy hybrid calendar that |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more - |details in SPARK-31404. You can set $config to 'LEGACY' to rebase the + |details in SPARK-31404. You can set ${toSQLConf(config)} to 'LEGACY' to rebase the |datetime values w.r.t. the calendar difference during writing, to get maximum - |interoperability. Or set $config to 'CORRECTED' to write the datetime values - |as it is, if you are 100% sure that the written files will only be read by + |interoperability. Or set ${toSQLConf(config)} to 'CORRECTED' to write the datetime + |values as it is, if you are 100% sure that the written files will only be read by |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. |""".stripMargin), cause = null diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala index 418b5d211d6c2..f84f159f6f0da 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala @@ -206,7 +206,7 @@ class QueryExecutionErrorsSuite }.getCause.asInstanceOf[SparkUpgradeException] val format = "Parquet" - val config = SQLConf.PARQUET_REBASE_MODE_IN_READ.key + val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_READ.key + "\"" val option = "datetimeRebaseMode" checkErrorClass( exception = e, @@ -218,10 +218,10 @@ class QueryExecutionErrorsSuite |from $format files can be ambiguous, as the files may be written by |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar |that is different from Spark 3.0+'s Proleptic Gregorian calendar. - |See more details in SPARK-31404. You can set the SQL config '$config' or + |See more details in SPARK-31404. You can set the SQL config $config or |the datasource option '$option' to 'LEGACY' to rebase the datetime values |w.r.t. the calendar difference during reading. To read the datetime values - |as it is, set the SQL config '$config' or the datasource option '$option' + |as it is, set the SQL config $config or the datasource option '$option' |to 'CORRECTED'. |""".stripMargin) } @@ -235,7 +235,7 @@ class QueryExecutionErrorsSuite }.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException] val format = "Parquet" - val config = SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key + val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key + "\"" checkErrorClass( exception = e, errorClass = "INCONSISTENT_BEHAVIOR_CROSS_VERSION", @@ -248,8 +248,8 @@ class QueryExecutionErrorsSuite |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more |details in SPARK-31404. You can set $config to 'LEGACY' to rebase the |datetime values w.r.t. the calendar difference during writing, to get maximum - |interoperability. Or set $config to 'CORRECTED' to write the datetime values - |as it is, if you are 100% sure that the written files will only be read by + |interoperability. Or set $config to 'CORRECTED' to write the datetime + |values as it is, if you are 100% sure that the written files will only be read by |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. |""".stripMargin) } From fd79792d152bb890de1230561c60a7cdaacd9fc4 Mon Sep 17 00:00:00 2001 From: Max Gekk Date: Sun, 24 Apr 2022 19:45:37 +0300 Subject: [PATCH 3/5] Apply toSQLConf() to QueryExecutionAnsiErrorsSuite --- .../sql/errors/QueryExecutionErrors.scala | 29 ++++++++++++------- .../QueryExecutionAnsiErrorsSuite.scala | 14 +++++---- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index e57977d4120c0..591726829255a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -91,7 +91,8 @@ object QueryExecutionErrors extends QueryErrorsBase { def castingCauseOverflowError(t: Any, dataType: DataType): ArithmeticException = { new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW", - messageParameters = Array(toSQLValue(t), toSQLType(dataType), SQLConf.ANSI_ENABLED.key)) + messageParameters = Array( + toSQLValue(t), toSQLType(dataType), toSQLConf(SQLConf.ANSI_ENABLED.key))) } def cannotChangeDecimalPrecisionError( @@ -99,9 +100,14 @@ object QueryExecutionErrors extends QueryErrorsBase { decimalPrecision: Int, decimalScale: Int, context: String): ArithmeticException = { - new SparkArithmeticException(errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION", - messageParameters = Array(value.toDebugString, - decimalPrecision.toString, decimalScale.toString, SQLConf.ANSI_ENABLED.key, context)) + new SparkArithmeticException( + errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION", + messageParameters = Array( + value.toDebugString, + decimalPrecision.toString, + decimalScale.toString, + toSQLConf(SQLConf.ANSI_ENABLED.key), + context)) } def invalidInputSyntaxForNumericError( @@ -148,7 +154,8 @@ object QueryExecutionErrors extends QueryErrorsBase { def divideByZeroError(context: String): ArithmeticException = { new SparkArithmeticException( - errorClass = "DIVIDE_BY_ZERO", messageParameters = Array(SQLConf.ANSI_ENABLED.key, context)) + errorClass = "DIVIDE_BY_ZERO", + messageParameters = Array(toSQLConf(SQLConf.ANSI_ENABLED.key), context)) } def invalidArrayIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = { @@ -163,8 +170,9 @@ object QueryExecutionErrors extends QueryErrorsBase { index: Int, numElements: Int, key: String): ArrayIndexOutOfBoundsException = { - new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX", - messageParameters = Array(toSQLValue(index), toSQLValue(numElements), key)) + new SparkArrayIndexOutOfBoundsException( + errorClass = "INVALID_ARRAY_INDEX", + messageParameters = Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(key))) } def invalidElementAtIndexError( @@ -173,7 +181,7 @@ object QueryExecutionErrors extends QueryErrorsBase { new SparkArrayIndexOutOfBoundsException( errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT", messageParameters = - Array(toSQLValue(index), toSQLValue(numElements), SQLConf.ANSI_ENABLED.key)) + Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(SQLConf.ANSI_ENABLED.key))) } def mapKeyNotExistError(key: Any, context: String): NoSuchElementException = { @@ -182,8 +190,9 @@ object QueryExecutionErrors extends QueryErrorsBase { } def invalidFractionOfSecondError(): DateTimeException = { - new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND", - Array(SQLConf.ANSI_ENABLED.key)) + new SparkDateTimeException( + errorClass = "INVALID_FRACTION_OF_SECOND", + Array(toSQLConf(SQLConf.ANSI_ENABLED.key))) } def ansiDateTimeParseError(e: DateTimeParseException): DateTimeParseException = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala index fa44036f90ce9..b49440a770e62 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala @@ -24,6 +24,8 @@ import org.apache.spark.sql.internal.SQLConf class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase { override def sparkConf: SparkConf = super.sparkConf.set(SQLConf.ANSI_ENABLED.key, "true") + private val ansiConf = "\"" + SQLConf.ANSI_ENABLED.key + "\"" + test("CAST_CAUSES_OVERFLOW: from timestamp to int") { checkErrorClass( exception = intercept[SparkArithmeticException] { @@ -33,7 +35,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase msg = "Casting 253402258394567890L to \"INT\" causes overflow. " + "To return NULL instead, use 'try_cast'. " + - "If necessary set spark.sql.ansi.enabled to false to bypass this error.", + s"If necessary set $ansiConf to false to bypass this error.", sqlState = Some("22005")) } @@ -45,7 +47,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase errorClass = "DIVIDE_BY_ZERO", msg = "divide by zero. To return NULL instead, use 'try_divide'. If necessary set " + - "spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error." + + s"$ansiConf to false (except for ANSI interval type) to bypass this error." + """ |== SQL(line 1, position 7) == |select 6/0 @@ -61,7 +63,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase }, errorClass = "INVALID_FRACTION_OF_SECOND", msg = "The fraction of sec must be zero. Valid range is [0, 60]. " + - "If necessary set spark.sql.ansi.enabled to false to bypass this error. ", + s"If necessary set $ansiConf to false to bypass this error. ", sqlState = Some("22023")) } @@ -73,7 +75,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION", msg = "Decimal(expanded,66666666666666.666,17,3}) cannot be represented as Decimal(8, 1). " + - "If necessary set spark.sql.ansi.enabled to false to bypass this error." + + s"If necessary set $ansiConf to false to bypass this error." + """ |== SQL(line 1, position 7) == |select CAST('66666666666666.666' AS DECIMAL(8, 1)) @@ -89,7 +91,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase }, errorClass = "INVALID_ARRAY_INDEX", msg = "Invalid index: 8, numElements: 5. " + - "If necessary set spark.sql.ansi.enabled to false to bypass this error." + s"If necessary set $ansiConf to false to bypass this error." ) } @@ -101,7 +103,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT", msg = "Invalid index: 8, numElements: 5. " + "To return NULL instead, use 'try_element_at'. " + - "If necessary set spark.sql.ansi.enabled to false to bypass this error." + s"If necessary set $ansiConf to false to bypass this error." ) } } From 16e4bbadb47767c4ecde42340a20a4eda780c117 Mon Sep 17 00:00:00 2001 From: Max Gekk Date: Sun, 24 Apr 2022 20:35:21 +0300 Subject: [PATCH 4/5] Use quote for a config in the json file --- core/src/main/resources/error/error-classes.json | 2 +- .../apache/spark/sql/errors/QueryCompilationErrorsSuite.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/resources/error/error-classes.json b/core/src/main/resources/error/error-classes.json index d6e510f659c99..9cb4cb222aa42 100644 --- a/core/src/main/resources/error/error-classes.json +++ b/core/src/main/resources/error/error-classes.json @@ -197,7 +197,7 @@ "message" : [ "The operation is not supported: " ] }, "UNTYPED_SCALA_UDF" : { - "message" : [ "You're using untyped Scala UDF, which does not have the input type information. Spark may blindly pass null to the Scala closure with primitive-type argument, and the closure will see the default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. To get rid of this error, you could:\n1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { override def call(s: String): Integer = s.length() }, IntegerType)`, if input types are all non primitive\n3. set spark.sql.legacy.allowUntypedScalaUDF to true and use this API with caution" ] + "message" : [ "You're using untyped Scala UDF, which does not have the input type information. Spark may blindly pass null to the Scala closure with primitive-type argument, and the closure will see the default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. To get rid of this error, you could:\n1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { override def call(s: String): Integer = s.length() }, IntegerType)`, if input types are all non primitive\n3. set \"spark.sql.legacy.allowUntypedScalaUDF\" to true and use this API with caution" ] }, "WRITING_JOB_ABORTED" : { "message" : [ "Writing job aborted" ], diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala index 66d7465c18412..8b63ba52ab896 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala @@ -227,7 +227,7 @@ class QueryCompilationErrorsSuite "2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { " + "override def call(s: String): Integer = s.length() }, IntegerType)`, " + "if input types are all non primitive\n" + - s"3. set ${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key} to true and " + + s"""3. set "${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key}" to true and """ + s"use this API with caution") } From 9950c41f4ed550426605383911b64402968ceeb8 Mon Sep 17 00:00:00 2001 From: Max Gekk Date: Mon, 25 Apr 2022 08:55:39 +0300 Subject: [PATCH 5/5] Re-gen sql.out --- .../sql-tests/results/ansi/array.sql.out | 24 +++++++++---------- .../sql-tests/results/ansi/cast.sql.out | 2 +- .../ansi/decimalArithmeticOperations.sql.out | 8 +++---- .../sql-tests/results/ansi/interval.sql.out | 6 ++--- .../sql-tests/results/ansi/timestamp.sql.out | 2 +- .../sql-tests/results/interval.sql.out | 4 ++-- .../sql-tests/results/postgreSQL/case.sql.out | 6 ++--- .../results/postgreSQL/float4.sql.out | 6 ++--- .../results/postgreSQL/float8.sql.out | 2 +- .../sql-tests/results/postgreSQL/int8.sql.out | 14 +++++------ .../results/postgreSQL/select_having.sql.out | 2 +- .../timestampNTZ/timestamp-ansi.sql.out | 2 +- .../results/udf/postgreSQL/udf-case.sql.out | 6 ++--- .../udf/postgreSQL/udf-select_having.sql.out | 2 +- 14 files changed, 43 insertions(+), 43 deletions(-) diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out index accc1f239bec1..fb148bbbe19e6 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out @@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -195,7 +195,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -204,7 +204,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -213,7 +213,7 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -254,7 +254,7 @@ select array(1, 2, 3)[5] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -263,7 +263,7 @@ select array(1, 2, 3)[-1] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -364,7 +364,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -373,7 +373,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -382,4 +382,4 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out index 114c730737060..96db4f2db4240 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out @@ -666,7 +666,7 @@ select cast('123.45' as decimal(4, 2)) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select cast('123.45' as decimal(4, 2)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out index 94b52d3afc183..1640875973ead 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out @@ -76,7 +76,7 @@ select (5e36BD + 0.1) + 5e36BD struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select (5e36BD + 0.1) + 5e36BD ^^^^^^^^^^^^^^^^^^^^^^^ @@ -88,7 +88,7 @@ select (-4e36BD - 0.1) - 7e36BD struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select (-4e36BD - 0.1) - 7e36BD ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -100,7 +100,7 @@ select 12345678901234567890.0 * 12345678901234567890.0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select 12345678901234567890.0 * 12345678901234567890.0 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -112,7 +112,7 @@ select 1e35BD / 0.1 struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select 1e35BD / 0.1 ^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out index f5687f22b2d6b..94d693545461a 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out @@ -228,7 +228,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -264,7 +264,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ @@ -664,7 +664,7 @@ select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out index 16255ae3fde69..5183a4d9a7c0c 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out @@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007) struct<> -- !query output org.apache.spark.SparkDateTimeException -[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/interval.sql.out index b1baa7ac3927f..19412d04194bc 100644 --- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out @@ -204,7 +204,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -240,7 +240,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out index 93b9deb9520ee..8932672d7a207 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 30) == SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out index 905cf2c4c358a..94b4ce3bb2faf 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out @@ -340,7 +340,7 @@ SELECT int(float('2147483647')) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting 2.14748365E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting 2.14748365E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -357,7 +357,7 @@ SELECT int(float('-2147483900')) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting -2.1474839E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting -2.1474839E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -390,7 +390,7 @@ SELECT bigint(float('-9223380000000000000')) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting -9.22338E18 to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting -9.22338E18 to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out index 88d9565f0fb12..3cdbe4c4f9a40 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out @@ -845,7 +845,7 @@ SELECT bigint(double('-9223372036854780000')) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting -9.22337203685478E18D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting -9.22337203685478E18D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out index 157cd39d767c8..54b3c4410acd4 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out @@ -575,7 +575,7 @@ select bigint('9223372036854775800') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('9223372036854775800') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -587,7 +587,7 @@ select bigint('-9223372036854775808') / smallint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('-9223372036854775808') / smallint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -599,7 +599,7 @@ select smallint('100') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select smallint('100') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -619,7 +619,7 @@ SELECT CAST(q1 AS int) FROM int8_tbl WHERE q2 <> 456 struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -636,7 +636,7 @@ SELECT CAST(q1 AS smallint) FROM int8_tbl WHERE q2 <> 456 struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "SMALLINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "SMALLINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -673,7 +673,7 @@ SELECT CAST(double('922337203685477580700.0') AS bigint) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting 9.223372036854776E20D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting 9.223372036854776E20D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -745,7 +745,7 @@ SELECT string(int(shiftleft(bigint(-1), 63))+1) struct<> -- !query output org.apache.spark.SparkArithmeticException -[CAST_CAUSES_OVERFLOW] Casting -9223372036854775808L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[CAST_CAUSES_OVERFLOW] Casting -9223372036854775808L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out index 43e4de5cb58f0..618f57b1cf059 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out index 0911d814b342a..920f3a7462b51 100644 --- a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out @@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007) struct<> -- !query output org.apache.spark.SparkDateTimeException -[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error. +[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out index cd1e2306cad5c..b6591be87a8f2 100755 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 31) == SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 54) == ...HEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 34) == ...LECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out index a6ade1ea1599f..60043b7b01d6a 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 ^^^^^^^^