From 1816db5993071413a2708b25ea5b34ec925c3312 Mon Sep 17 00:00:00 2001 From: yangjie01 Date: Sun, 31 Dec 2023 01:53:14 +0800 Subject: [PATCH 1/3] init --- python/pyspark/sql/functions/builtin.py | 198 ++++++++++++++++++------ 1 file changed, 150 insertions(+), 48 deletions(-) diff --git a/python/pyspark/sql/functions/builtin.py b/python/pyspark/sql/functions/builtin.py index 0ff1ee2a7394a..458bb8c8feaf4 100644 --- a/python/pyspark/sql/functions/builtin.py +++ b/python/pyspark/sql/functions/builtin.py @@ -12810,7 +12810,7 @@ def try_element_at(col: "ColumnOrName", extraction: "ColumnOrName") -> Column: @_try_remote_functions def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column: """ - Collection function: Returns element of array at given (0-based) index. + Array function: Returns the element of an array at the given (0-based) index. If the index points outside of the array boundaries, then this function returns NULL. @@ -12819,18 +12819,18 @@ def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column: Parameters ---------- col : :class:`~pyspark.sql.Column` or str - name of column containing array + Name of the column containing the array. index : :class:`~pyspark.sql.Column` or str or int - index to check for in array + Index to check for in the array. Returns ------- :class:`~pyspark.sql.Column` - value at given position. + Value at the given position. Notes ----- - The position is not 1 based, but 0 based index. + The position is not 1-based, but 0-based index. Supports Spark Connect. See Also @@ -12839,41 +12839,61 @@ def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column: Examples -------- - >>> df = spark.createDataFrame([(["a", "b", "c"], 1)], ['data', 'index']) - >>> df.select(get(df.data, 1)).show() + Example 1: Getting an element at a fixed position + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([(["a", "b", "c"],)], ['data']) + >>> df.select(sf.get(df.data, 1)).show() +------------+ |get(data, 1)| +------------+ | b| +------------+ - >>> df.select(get(df.data, -1)).show() - +-------------+ - |get(data, -1)| - +-------------+ - | NULL| - +-------------+ + Example 2: Getting an element at a position outside the array boundaries - >>> df.select(get(df.data, 3)).show() + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([(["a", "b", "c"],)], ['data']) + >>> df.select(sf.get(df.data, 3)).show() +------------+ |get(data, 3)| +------------+ | NULL| +------------+ - >>> df.select(get(df.data, "index")).show() + Example 3: Getting an element at a position specified by another column + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([(["a", "b", "c"], 2)], ['data', 'index']) + >>> df.select(sf.get(df.data, df.index)).show() +----------------+ |get(data, index)| +----------------+ - | b| + | c| +----------------+ - >>> df.select(get(df.data, col("index") - 1)).show() + + Example 4: Getting an element at a position calculated from another column + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([(["a", "b", "c"], 2)], ['data', 'index']) + >>> df.select(sf.get(df.data, df.index - 1)).show() +----------------------+ |get(data, (index - 1))| +----------------------+ - | a| + | b| +----------------------+ + + Example 5: Getting an element at a negative position + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([(["a", "b", "c"], )], ['data']) + >>> df.select(sf.get(df.data, -1)).show() + +-------------+ + |get(data, -1)| + +-------------+ + | NULL| + +-------------+ """ index = lit(index) if isinstance(index, int) else index @@ -15064,7 +15084,7 @@ def cardinality(col: "ColumnOrName") -> Column: @_try_remote_functions def sort_array(col: "ColumnOrName", asc: bool = True) -> Column: """ - Collection function: sorts the input array in ascending or descending order according + Array function: Sorts the input array in ascending or descending order according to the natural ordering of the array elements. Null elements will be placed at the beginning of the returned array in ascending order or at the end of the returned array in descending order. @@ -15077,23 +15097,76 @@ def sort_array(col: "ColumnOrName", asc: bool = True) -> Column: Parameters ---------- col : :class:`~pyspark.sql.Column` or str - name of column or expression + Name of the column or expression. asc : bool, optional - whether to sort in ascending or descending order. If `asc` is True (default) - then ascending and if False then descending. + Whether to sort in ascending or descending order. If `asc` is True (default), + then the sorting is in ascending order. If False, then in descending order. Returns ------- :class:`~pyspark.sql.Column` - sorted array. + Sorted array. Examples -------- - >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) - >>> df.select(sort_array(df.data).alias('r')).collect() - [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])] - >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() - [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])] + Example 1: Sorting an array in ascending order + + >>> import pyspark.sql.functions as sf + >>> df = spark.createDataFrame([([2, 1, None, 3],)], ['data']) + >>> df.select(sf.sort_array(df.data)).show() + +----------------------+ + |sort_array(data, true)| + +----------------------+ + | [NULL, 1, 2, 3]| + +----------------------+ + + Example 2: Sorting an array in descending order + + >>> import pyspark.sql.functions as sf + >>> df = spark.createDataFrame([([2, 1, None, 3],)], ['data']) + >>> df.select(sf.sort_array(df.data, asc=False)).show() + +-----------------------+ + |sort_array(data, false)| + +-----------------------+ + | [3, 2, 1, NULL]| + +-----------------------+ + + Example 3: Sorting an array with a single element + + >>> import pyspark.sql.functions as sf + >>> df = spark.createDataFrame([([1],)], ['data']) + >>> df.select(sf.sort_array(df.data)).show() + +----------------------+ + |sort_array(data, true)| + +----------------------+ + | [1]| + +----------------------+ + + Example 4: Sorting an empty array + + >>> from pyspark.sql import functions as sf + >>> from pyspark.sql.types import ArrayType, StringType, StructField, StructType + >>> schema = StructType([StructField("data", ArrayType(StringType()), True)]) + >>> df = spark.createDataFrame([([],)], schema=schema) + >>> df.select(sf.sort_array(df.data)).show() + +----------------------+ + |sort_array(data, true)| + +----------------------+ + | []| + +----------------------+ + + Example 5: Sorting an array with null values + + >>> from pyspark.sql import functions as sf + >>> from pyspark.sql.types import ArrayType, IntegerType, StructType, StructField + >>> schema = StructType([StructField("data", ArrayType(IntegerType()), True)]) + >>> df = spark.createDataFrame([([None, None, None],)], schema=schema) + >>> df.select(sf.sort_array(df.data)).show() + +----------------------+ + |sort_array(data, true)| + +----------------------+ + | [NULL, NULL, NULL]| + +----------------------+ """ return _invoke_function("sort_array", _to_java_column(col), asc) @@ -15523,9 +15596,9 @@ def array_repeat(col: "ColumnOrName", count: Union["ColumnOrName", int]) -> Colu @_try_remote_functions def arrays_zip(*cols: "ColumnOrName") -> Column: """ - Collection function: Returns a merged array of structs in which the N-th struct contains all + Array function: Returns a merged array of structs in which the N-th struct contains all N-th values of input arrays. If one of the arrays is shorter than others then - resulting struct type value will be a `null` for missing elements. + the resulting struct type value will be a `null` for missing elements. .. versionadded:: 2.4.0 @@ -15535,31 +15608,60 @@ def arrays_zip(*cols: "ColumnOrName") -> Column: Parameters ---------- cols : :class:`~pyspark.sql.Column` or str - columns of arrays to be merged. + Columns of arrays to be merged. Returns ------- :class:`~pyspark.sql.Column` - merged array of entries. + Merged array of entries. Examples -------- - >>> from pyspark.sql.functions import arrays_zip - >>> df = spark.createDataFrame([([1, 2, 3], [2, 4, 6], [3, 6])], ['vals1', 'vals2', 'vals3']) - >>> df = df.select(arrays_zip(df.vals1, df.vals2, df.vals3).alias('zipped')) - >>> df.show(truncate=False) - +------------------------------------+ - |zipped | - +------------------------------------+ - |[{1, 2, 3}, {2, 4, 6}, {3, 6, NULL}]| - +------------------------------------+ - >>> df.printSchema() - root - |-- zipped: array (nullable = true) - | |-- element: struct (containsNull = false) - | | |-- vals1: long (nullable = true) - | | |-- vals2: long (nullable = true) - | | |-- vals3: long (nullable = true) + Example 1: Zipping two arrays of the same length + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([([1, 2, 3], ['a', 'b', 'c'])], ['nums', 'letters']) + >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False) + +-------------------------+ + |arrays_zip(nums, letters)| + +-------------------------+ + |[{1, a}, {2, b}, {3, c}] | + +-------------------------+ + + + Example 2: Zipping arrays of different lengths + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([([1, 2], ['a', 'b', 'c'])], ['nums', 'letters']) + >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False) + +---------------------------+ + |arrays_zip(nums, letters) | + +---------------------------+ + |[{1, a}, {2, b}, {NULL, c}]| + +---------------------------+ + + Example 3: Zipping more than two arrays + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame( + ... [([1, 2], ['a', 'b'], [True, False])], ['nums', 'letters', 'bools']) + >>> df.select(sf.arrays_zip(df.nums, df.letters, df.bools)).show(truncate=False) + +--------------------------------+ + |arrays_zip(nums, letters, bools)| + +--------------------------------+ + |[{1, a, true}, {2, b, false}] | + +--------------------------------+ + + Example 4: Zipping arrays with null values + + >>> from pyspark.sql import functions as sf + >>> df = spark.createDataFrame([([1, 2, None], ['a', None, 'c'])], ['nums', 'letters']) + >>> df.select(sf.arrays_zip(df.nums, df.letters)).show(truncate=False) + +------------------------------+ + |arrays_zip(nums, letters) | + +------------------------------+ + |[{1, a}, {2, NULL}, {NULL, c}]| + +------------------------------+ """ return _invoke_function_over_seq_of_columns("arrays_zip", cols) From dde4956b8c1a42b46e9740659867d79a009da002 Mon Sep 17 00:00:00 2001 From: yangjie01 Date: Sun, 31 Dec 2023 01:58:27 +0800 Subject: [PATCH 2/3] check ansi is true --- .github/workflows/build_and_test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 56b34d6c5e7b6..3dc0c2eb72ae2 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -377,6 +377,7 @@ jobs: SKIP_PACKAGING: true METASPACE_SIZE: 1g BRANCH: ${{ inputs.branch }} + SPARK_ANSI_SQL_MODE: true steps: - name: Checkout Spark repository uses: actions/checkout@v4 From 9aa91ddddaaeed4f8e777a59d3bd9b5dd0694957 Mon Sep 17 00:00:00 2001 From: yangjie01 Date: Sun, 31 Dec 2023 13:42:33 +0800 Subject: [PATCH 3/3] check ansi is false --- .github/workflows/build_and_test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 3dc0c2eb72ae2..56b34d6c5e7b6 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -377,7 +377,6 @@ jobs: SKIP_PACKAGING: true METASPACE_SIZE: 1g BRANCH: ${{ inputs.branch }} - SPARK_ANSI_SQL_MODE: true steps: - name: Checkout Spark repository uses: actions/checkout@v4