Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/pyspark/sql/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2225,7 +2225,7 @@ def json_tuple(col, *fields):
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
[Row(key=u'1', col1=u'value1', col2=u'value2'), Row(key=u'2', col1=u'value12', col2=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ case class Stack(children: Seq[Expression]) extends Generator {

override def elementSchema: StructType =
StructType(children.tail.take(numFields).zipWithIndex.map {
case (e, index) => StructField(s"col$index", e.dataType)
case (e, index) => StructField(s"col${index + 1}", e.dataType)
})

override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ case class JsonTuple(children: Seq[Expression])
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)

override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
case (_, idx) => StructField(s"col${idx + 1}", StringType, nullable = true)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

last question, @chakravarthiT. What does Hive's json_tuple returns for the column names? I think we matched the column names with Hive when we added this long time ago.

})

override def prettyName: String = "json_tuple"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ Invalid number of arguments for function from_json. Expected: one of 2 and 3; Fo
-- !query 18
SELECT json_tuple('{"a" : 1, "b" : 2}', CAST(NULL AS STRING), 'b', CAST(NULL AS STRING), 'a')
-- !query 18 schema
struct<c0:string,c1:string,c2:string,c3:string>
struct<col1:string,col2:string,col3:string,col4:string>
-- !query 18 output
NULL 2 NULL 1

Expand All @@ -179,7 +179,7 @@ struct<>
-- !query 20
SELECT json_tuple(jsonField, 'b', CAST(NULL AS STRING), a) FROM jsonTable
-- !query 20 schema
struct<c0:string,c1:string,c2:string>
struct<col1:string,col2:string,col3:string>
-- !query 20 output
2 NULL 1

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ NULL
-- !query 30
select json_tuple(a, a) from t
-- !query 30 schema
struct<c0:string>
struct<col1:string>
-- !query 30 output
NULL

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ class JsonFunctionsSuite extends QueryTest with SharedSQLContext {
val df: DataFrame = tuples.toDF("key", "jstring")
val expr = df
.select(functions.json_tuple($"jstring", "f1", "f2"))
.where($"c0".isNotNull)
.groupBy($"c1")
.where($"col1".isNotNull)
.groupBy($"col2")
.count()

val expected = Row(null, 1) ::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,13 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
}

test("SPARK-26879 Standardize One-Based column indexing for stack and json_tuple function") {
val dfstack = sql("SELECT stack(2, 1, 2, 3)")
assert(dfstack.columns(0) == "col1" && dfstack.columns(1) == "col2")
val dfjson_tuple = sql("SELECT json_tuple('{\"a\":1, \"b\":2}', 'a', 'b')")
assert(dfjson_tuple.columns(0) == "col1" && dfjson_tuple.columns(1) == "col2")
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we don't need this test.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the test is not bad to have, this way if we change the behaviour in the future unintentionally something will catch it.


test("support table.star") {
checkAnswer(
sql(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1554,7 +1554,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
// we should use `c0`, `c1`... as the name of fields if no alias is provided, to follow hive.
checkAnswer(sql(
"""
|SELECT c0, c1
|SELECT col1, col2
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
|LATERAL VIEW json_tuple(json, 'f1', 'f2') jt
""".stripMargin), Row("value1", "12"))
Expand Down