diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala index 4400bedfd5d49..1f8fa1e1b4c86 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala @@ -48,9 +48,13 @@ trait QueryErrorsBase { litToErrorValue(Literal.create(v, t)) } + private def quoteByDefault(elem: String): String = { + "\"" + elem + "\"" + } + // Quote sql statements in error messages. def toSQLStmt(text: String): String = { - "\"" + text.toUpperCase(Locale.ROOT) + "\"" + quoteByDefault(text.toUpperCase(Locale.ROOT)) } def toSQLId(parts: Seq[String]): String = { @@ -62,6 +66,10 @@ trait QueryErrorsBase { } def toSQLType(t: DataType): String = { - "\"" + t.sql + "\"" + quoteByDefault(t.sql) + } + + def toSQLConf(conf: String): String = { + quoteByDefault(conf) } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index 0fb8edf873d79..c7651d8029ebf 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -91,7 +91,8 @@ object QueryExecutionErrors extends QueryErrorsBase { def castingCauseOverflowError(t: Any, dataType: DataType): ArithmeticException = { new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW", - messageParameters = Array(toSQLValue(t), toSQLType(dataType), SQLConf.ANSI_ENABLED.key)) + messageParameters = Array( + toSQLValue(t), toSQLType(dataType), toSQLConf(SQLConf.ANSI_ENABLED.key))) } def cannotChangeDecimalPrecisionError( @@ -99,9 +100,14 @@ object QueryExecutionErrors extends QueryErrorsBase { decimalPrecision: Int, decimalScale: Int, context: String): ArithmeticException = { - new SparkArithmeticException(errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION", - messageParameters = Array(value.toDebugString, - decimalPrecision.toString, decimalScale.toString, SQLConf.ANSI_ENABLED.key, context)) + new SparkArithmeticException( + errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION", + messageParameters = Array( + value.toDebugString, + decimalPrecision.toString, + decimalScale.toString, + toSQLConf(SQLConf.ANSI_ENABLED.key), + context)) } def invalidInputSyntaxForNumericError( @@ -148,7 +154,8 @@ object QueryExecutionErrors extends QueryErrorsBase { def divideByZeroError(context: String): ArithmeticException = { new SparkArithmeticException( - errorClass = "DIVIDE_BY_ZERO", messageParameters = Array(SQLConf.ANSI_ENABLED.key, context)) + errorClass = "DIVIDE_BY_ZERO", + messageParameters = Array(toSQLConf(SQLConf.ANSI_ENABLED.key), context)) } def invalidArrayIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = { @@ -163,8 +170,9 @@ object QueryExecutionErrors extends QueryErrorsBase { index: Int, numElements: Int, key: String): ArrayIndexOutOfBoundsException = { - new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX", - messageParameters = Array(toSQLValue(index), toSQLValue(numElements), key)) + new SparkArrayIndexOutOfBoundsException( + errorClass = "INVALID_ARRAY_INDEX", + messageParameters = Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(key))) } def invalidElementAtIndexError( @@ -173,7 +181,7 @@ object QueryExecutionErrors extends QueryErrorsBase { new SparkArrayIndexOutOfBoundsException( errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT", messageParameters = - Array(toSQLValue(index), toSQLValue(numElements), SQLConf.ANSI_ENABLED.key)) + Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(SQLConf.ANSI_ENABLED.key))) } def mapKeyNotExistError(key: Any, context: String): NoSuchElementException = { @@ -186,8 +194,9 @@ object QueryExecutionErrors extends QueryErrorsBase { } def invalidFractionOfSecondError(): DateTimeException = { - new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND", - Array(SQLConf.ANSI_ENABLED.key)) + new SparkDateTimeException( + errorClass = "INVALID_FRACTION_OF_SECOND", + Array(toSQLConf(SQLConf.ANSI_ENABLED.key))) } def ansiDateTimeParseError(e: DateTimeParseException): DateTimeParseException = { @@ -550,10 +559,10 @@ object QueryExecutionErrors extends QueryErrorsBase { |from $format files can be ambiguous, as the files may be written by |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar |that is different from Spark 3.0+'s Proleptic Gregorian calendar. - |See more details in SPARK-31404. You can set the SQL config '$config' or + |See more details in SPARK-31404. You can set the SQL config ${toSQLConf(config)} or |the datasource option '$option' to 'LEGACY' to rebase the datetime values |w.r.t. the calendar difference during reading. To read the datetime values - |as it is, set the SQL config '$config' or the datasource option '$option' + |as it is, set the SQL config ${toSQLConf(config)} or the datasource option '$option' |to 'CORRECTED'. |""".stripMargin), cause = null @@ -566,16 +575,16 @@ object QueryExecutionErrors extends QueryErrorsBase { messageParameters = Array( "3.0", s""" - |writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z - |into $format files can be dangerous, as the files may be read by Spark 2.x - |or legacy versions of Hive later, which uses a legacy hybrid calendar that - |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more - |details in SPARK-31404. You can set $config to 'LEGACY' to rebase the - |datetime values w.r.t. the calendar difference during writing, to get maximum - |interoperability. Or set $config to 'CORRECTED' to write the datetime values - |as it is, if you are 100% sure that the written files will only be read by - |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. - |""".stripMargin), + |writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z + |into $format files can be dangerous, as the files may be read by Spark 2.x + |or legacy versions of Hive later, which uses a legacy hybrid calendar that + |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more + |details in SPARK-31404. You can set ${toSQLConf(config)} to 'LEGACY' to rebase the + |datetime values w.r.t. the calendar difference during writing, to get maximum + |interoperability. Or set ${toSQLConf(config)} to 'CORRECTED' to write the datetime + |values as it is, if you are 100% sure that the written files will only be read by + |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. + |""".stripMargin), cause = null ) } diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out index 187630d78d2f0..9c659019ba2ef 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out @@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -195,7 +195,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -204,7 +204,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -213,7 +213,7 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -254,7 +254,7 @@ select array(1, 2, 3)[5] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 5, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 5, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -263,7 +263,7 @@ select array(1, 2, 3)[-1] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: -1, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: -1, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -364,7 +364,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -373,7 +373,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -382,4 +382,4 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out index ec4770b950885..bfd0f41883c90 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out @@ -666,7 +666,7 @@ select cast('123.45' as decimal(4, 2)) struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select cast('123.45' as decimal(4, 2)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out index 8cd1f6fba3b90..608eac99b7c11 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out @@ -76,7 +76,7 @@ select (5e36BD + 0.1) + 5e36BD struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select (5e36BD + 0.1) + 5e36BD ^^^^^^^^^^^^^^^^^^^^^^^ @@ -88,7 +88,7 @@ select (-4e36BD - 0.1) - 7e36BD struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select (-4e36BD - 0.1) - 7e36BD ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -100,7 +100,7 @@ select 12345678901234567890.0 * 12345678901234567890.0 struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select 12345678901234567890.0 * 12345678901234567890.0 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -112,7 +112,7 @@ select 1e35BD / 0.1 struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select 1e35BD / 0.1 ^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out index 30b14eceb70ce..ddd04d5bea67b 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out @@ -228,7 +228,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -264,7 +264,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ @@ -664,7 +664,7 @@ select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789) struct<> -- !query output org.apache.spark.SparkArithmeticException -Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error. +Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error. == SQL(line 1, position 7) == select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out index cf6af94818939..7a76e36846076 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out @@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007) struct<> -- !query output org.apache.spark.SparkDateTimeException -The fraction of sec must be zero. Valid range is [0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error. +The fraction of sec must be zero. Valid range is [0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/interval.sql.out index 58c86c9ccf65c..5e03105269bd5 100644 --- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out @@ -204,7 +204,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -240,7 +240,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out index 3d91e4290cc25..db34e6c71d9f2 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 30) == SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out index 106a139c3b6a2..04c8837baac03 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out @@ -340,7 +340,7 @@ SELECT int(float('2147483647')) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting 2.14748365E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting 2.14748365E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -357,7 +357,7 @@ SELECT int(float('-2147483900')) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting -2.1474839E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting -2.1474839E9 to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -390,7 +390,7 @@ SELECT bigint(float('-9223380000000000000')) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting -9.22338E18 to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting -9.22338E18 to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out index 57f97e3c242c7..75943e4010bea 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out @@ -845,7 +845,7 @@ SELECT bigint(double('-9223372036854780000')) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting -9.22337203685478E18D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting -9.22337203685478E18D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out index 7939887b4e29e..35d72e2b6cec5 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out @@ -575,7 +575,7 @@ select bigint('9223372036854775800') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('9223372036854775800') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -587,7 +587,7 @@ select bigint('-9223372036854775808') / smallint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('-9223372036854775808') / smallint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -599,7 +599,7 @@ select smallint('100') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select smallint('100') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -619,7 +619,7 @@ SELECT CAST(q1 AS int) FROM int8_tbl WHERE q2 <> 456 struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting 4567890123456789L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting 4567890123456789L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -636,7 +636,7 @@ SELECT CAST(q1 AS smallint) FROM int8_tbl WHERE q2 <> 456 struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting 4567890123456789L to "SMALLINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting 4567890123456789L to "SMALLINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -673,7 +673,7 @@ SELECT CAST(double('922337203685477580700.0') AS bigint) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting 9.223372036854776E20D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting 9.223372036854776E20D to "BIGINT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -745,7 +745,7 @@ SELECT string(int(shiftleft(bigint(-1), 63))+1) struct<> -- !query output org.apache.spark.SparkArithmeticException -Casting -9223372036854775808L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to false to bypass this error. +Casting -9223372036854775808L to "INT" causes overflow. To return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out index 0f6492d0fb0ca..5c05026ec894e 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out index 10e1cb7eeed48..68060936c9449 100644 --- a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out @@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007) struct<> -- !query output org.apache.spark.SparkDateTimeException -The fraction of sec must be zero. Valid range is [0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error. +The fraction of sec must be zero. Valid range is [0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out index 8d4f15287e227..1e2f224fe0452 100755 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 31) == SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 54) == ...HEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 34) == ...LECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out index 9d30973d3143e..f054ef6ff022d 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error. +divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 ^^^^^^^^ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala index 323b6d375dbd8..d8fc1a52c0bfa 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala @@ -182,22 +182,22 @@ class QueryExecutionErrorsSuite extends QueryTest }.getCause.asInstanceOf[SparkUpgradeException] val format = "Parquet" - val config = SQLConf.PARQUET_REBASE_MODE_IN_READ.key + val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_READ.key + "\"" val option = "datetimeRebaseMode" assert(e.getErrorClass === "INCONSISTENT_BEHAVIOR_CROSS_VERSION") assert(e.getMessage === "You may get a different result due to the upgrading to Spark >= 3.0: " + - s""" - |reading dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z - |from $format files can be ambiguous, as the files may be written by - |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar - |that is different from Spark 3.0+'s Proleptic Gregorian calendar. - |See more details in SPARK-31404. You can set the SQL config '$config' or - |the datasource option '$option' to 'LEGACY' to rebase the datetime values - |w.r.t. the calendar difference during reading. To read the datetime values - |as it is, set the SQL config '$config' or the datasource option '$option' - |to 'CORRECTED'. - |""".stripMargin) + s""" + |reading dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z + |from $format files can be ambiguous, as the files may be written by + |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar + |that is different from Spark 3.0+'s Proleptic Gregorian calendar. + |See more details in SPARK-31404. You can set the SQL config $config or + |the datasource option '$option' to 'LEGACY' to rebase the datetime values + |w.r.t. the calendar difference during reading. To read the datetime values + |as it is, set the SQL config $config or the datasource option '$option' + |to 'CORRECTED'. + |""".stripMargin) } // Fail to write ancient datetime values. @@ -209,21 +209,21 @@ class QueryExecutionErrorsSuite extends QueryTest }.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException] val format = "Parquet" - val config = SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key + val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key + "\"" assert(e.getErrorClass === "INCONSISTENT_BEHAVIOR_CROSS_VERSION") assert(e.getMessage === "You may get a different result due to the upgrading to Spark >= 3.0: " + - s""" - |writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z - |into $format files can be dangerous, as the files may be read by Spark 2.x - |or legacy versions of Hive later, which uses a legacy hybrid calendar that - |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more - |details in SPARK-31404. You can set $config to 'LEGACY' to rebase the - |datetime values w.r.t. the calendar difference during writing, to get maximum - |interoperability. Or set $config to 'CORRECTED' to write the datetime values - |as it is, if you are 100% sure that the written files will only be read by - |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. - |""".stripMargin) + s""" + |writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z + |into $format files can be dangerous, as the files may be read by Spark 2.x + |or legacy versions of Hive later, which uses a legacy hybrid calendar that + |is different from Spark 3.0+'s Proleptic Gregorian calendar. See more + |details in SPARK-31404. You can set $config to 'LEGACY' to rebase the + |datetime values w.r.t. the calendar difference during writing, to get maximum + |interoperability. Or set $config to 'CORRECTED' to write the datetime + |values as it is, if you are 100% sure that the written files will only be read by + |Spark 3.0+ or other systems that use Proleptic Gregorian calendar. + |""".stripMargin) } } }