diff --git a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
index 8ea5f07c528f..722c044ab409 100644
--- a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
@@ -4263,6 +4263,7 @@ object functions {
*/
def to_binary(e: Column): Column = Column.fn("to_binary", e)
+ // scalastyle:off line.size.limit
/**
* Convert `e` to a string based on the `format`. Throws an exception if the conversion fails.
*
@@ -4283,13 +4284,20 @@ object functions {
* (optional, only allowed once at the beginning or end of the format string). Note that 'S'
* prints '+' for positive values but 'MI' prints a space.
'PR': Only allowed at the
* end of the format string; specifies that the result string will be wrapped by angle
- * brackets if the input value is negative.
+ * brackets if the input value is negative. If `e` is a datetime, `format` shall be
+ * a valid datetime pattern, see Datetime
+ * Patterns. If `e` is a binary, it is converted to a string in one of the formats:
+ * - 'base64': a base 64 string.
- 'hex': a string in the hexadecimal format.
+ * - 'utf-8': the input binary is decoded to UTF-8 string.
*
* @group string_funcs
* @since 3.5.0
*/
+ // scalastyle:on line.size.limit
def to_char(e: Column, format: Column): Column = Column.fn("to_char", e, format)
+ // scalastyle:off line.size.limit
/**
* Convert `e` to a string based on the `format`. Throws an exception if the conversion fails.
*
@@ -4310,11 +4318,17 @@ object functions {
* (optional, only allowed once at the beginning or end of the format string). Note that 'S'
* prints '+' for positive values but 'MI' prints a space. 'PR': Only allowed at the
* end of the format string; specifies that the result string will be wrapped by angle
- * brackets if the input value is negative.
+ * brackets if the input value is negative. If `e` is a datetime, `format` shall be
+ * a valid datetime pattern, see Datetime
+ * Patterns. If `e` is a binary, it is converted to a string in one of the formats:
+ * - 'base64': a base 64 string.
- 'hex': a string in the hexadecimal format.
+ * - 'utf-8': the input binary is decoded to UTF-8 string.
*
* @group string_funcs
* @since 3.5.0
*/
+ // scalastyle:on line.size.limit
def to_varchar(e: Column, format: Column): Column = Column.fn("to_varchar", e, format)
/**
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index fb02cb0cc98b..f5a5b2836266 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -10423,6 +10423,12 @@ def to_char(col: "ColumnOrName", format: "ColumnOrName") -> Column:
values but 'MI' prints a space.
'PR': Only allowed at the end of the format string; specifies that the result string
will be wrapped by angle brackets if the input value is negative.
+ If `col` is a datetime, `format` shall be a valid datetime pattern, see
+ Patterns.
+ If `col` is a binary, it is converted to a string in one of the formats:
+ 'base64': a base 64 string.
+ 'hex': a string in the hexadecimal format.
+ 'utf-8': the input binary is decoded to UTF-8 string.
.. versionadded:: 3.5.0
@@ -10463,6 +10469,12 @@ def to_varchar(col: "ColumnOrName", format: "ColumnOrName") -> Column:
values but 'MI' prints a space.
'PR': Only allowed at the end of the format string; specifies that the result string
will be wrapped by angle brackets if the input value is negative.
+ If `col` is a datetime, `format` shall be a valid datetime pattern, see
+ Patterns.
+ If `col` is a binary, it is converted to a string in one of the formats:
+ 'base64': a base 64 string.
+ 'hex': a string in the hexadecimal format.
+ 'utf-8': the input binary is decoded to UTF-8 string.
.. versionadded:: 3.5.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 9548f424ad40..4d32f297a986 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -4399,6 +4399,7 @@ object functions {
new ToBinary(e.expr)
}
+ // scalastyle:off line.size.limit
/**
* Convert `e` to a string based on the `format`.
* Throws an exception if the conversion fails. The format can consist of the following
@@ -4420,11 +4421,20 @@ object functions {
* 'PR': Only allowed at the end of the format string; specifies that the result string will be
* wrapped by angle brackets if the input value is negative.
*
+ * If `e` is a datetime, `format` shall be a valid datetime pattern, see
+ * Datetime Patterns.
+ * If `e` is a binary, it is converted to a string in one of the formats:
+ * 'base64': a base 64 string.
+ * 'hex': a string in the hexadecimal format.
+ * 'utf-8': the input binary is decoded to UTF-8 string.
+ *
* @group string_funcs
* @since 3.5.0
*/
+ // scalastyle:on line.size.limit
def to_char(e: Column, format: Column): Column = call_function("to_char", e, format)
+ // scalastyle:off line.size.limit
/**
* Convert `e` to a string based on the `format`.
* Throws an exception if the conversion fails. The format can consist of the following
@@ -4446,9 +4456,17 @@ object functions {
* 'PR': Only allowed at the end of the format string; specifies that the result string will be
* wrapped by angle brackets if the input value is negative.
*
+ * If `e` is a datetime, `format` shall be a valid datetime pattern, see
+ * Datetime Patterns.
+ * If `e` is a binary, it is converted to a string in one of the formats:
+ * 'base64': a base 64 string.
+ * 'hex': a string in the hexadecimal format.
+ * 'utf-8': the input binary is decoded to UTF-8 string.
+ *
* @group string_funcs
* @since 3.5.0
*/
+ // scalastyle:on line.size.limit
def to_varchar(e: Column, format: Column): Column = call_function("to_varchar", e, format)
/**