Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion python/pyspark/sql/readwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,10 +166,13 @@ def json(self, path, schema=None):
during parsing.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record and puts the malformed string into a new field configured by \
``spark.sql.columnNameOfCorruptRecord``. When a schema is set by user, it sets \
``columnNameOfCorruptRecord``. When a schema is set by user, it sets \
``null`` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
* ``columnNameOfCorruptRecord`` (default ``_corrupt_record``): allows renaming the \
new field having malformed string created by ``PERMISSIVE`` mode. \
This overrides ``spark.sql.columnNameOfCorruptRecord``.
>>> df1 = sqlContext.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
Expand Down
20 changes: 16 additions & 4 deletions sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
Original file line number Diff line number Diff line change
Expand Up @@ -293,11 +293,14 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* during parsing.<li>
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts the
* malformed string into a new field configured by `spark.sql.columnNameOfCorruptRecord`. When
* malformed string into a new field configured by `columnNameOfCorruptRecord`. When
* a schema is set by user, it sets `null` for extra fields.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* <li>`columnNameOfCorruptRecord` (default `_corrupt_record`): allows renaming the new field
* having malformed string created by `PERMISSIVE` mode. This overrides
* `spark.sql.columnNameOfCorruptRecord`.<li>
*
* @since 1.4.0
*/
Expand Down Expand Up @@ -326,11 +329,14 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* during parsing.<li>
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts the
* malformed string into a new field configured by `spark.sql.columnNameOfCorruptRecord`. When
* malformed string into a new field configured by `columnNameOfCorruptRecord`. When
* a schema is set by user, it sets `null` for extra fields.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* <li>`columnNameOfCorruptRecord` (default `_corrupt_record`): allows renaming the new field
* having malformed string created by `PERMISSIVE` mode. This overrides
* `spark.sql.columnNameOfCorruptRecord`.<li>
*
* @since 1.6.0
*/
Expand Down Expand Up @@ -360,8 +366,14 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*/
def json(jsonRDD: RDD[String]): DataFrame = {
val parsedOptions: JSONOptions = new JSONOptions(extraOptions.toMap)
val columnNameOfCorruptRecord =
parsedOptions.columnNameOfCorruptRecord
.getOrElse(sqlContext.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
InferSchema.infer(jsonRDD, sqlContext.conf.columnNameOfCorruptRecord, parsedOptions)
InferSchema.infer(
jsonRDD,
columnNameOfCorruptRecord,
parsedOptions)
}

Dataset.newDataFrame(
Expand All @@ -371,7 +383,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
JacksonParser.parse(
jsonRDD,
schema,
sqlContext.conf.columnNameOfCorruptRecord,
columnNameOfCorruptRecord,
parsedOptions))(sqlContext))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ private[sql] class JSONOptions(
parameters.get("allowBackslashEscapingAnyCharacter").map(_.toBoolean).getOrElse(false)
val compressionCodec = parameters.get("compression").map(CompressionCodecs.getCodecClassName)
private val parseMode = parameters.getOrElse("mode", "PERMISSIVE")
val columnNameOfCorruptRecord = parameters.get("columnNameOfCorruptRecord")

// Parse mode flags
if (!ParseModes.isValidMode(parseMode)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,17 @@ class DefaultSource extends FileFormat with DataSourceRegister {
None
} else {
val parsedOptions: JSONOptions = new JSONOptions(options)
val columnNameOfCorruptRecord =
parsedOptions.columnNameOfCorruptRecord
.getOrElse(sqlContext.conf.columnNameOfCorruptRecord)
val jsonFiles = files.filterNot { status =>
val name = status.getPath.getName
name.startsWith("_") || name.startsWith(".")
}.toArray

val jsonSchema = InferSchema.infer(
createBaseRdd(sqlContext, jsonFiles),
sqlContext.conf.columnNameOfCorruptRecord,
columnNameOfCorruptRecord,
parsedOptions)
checkConstraints(jsonSchema)

Expand Down Expand Up @@ -102,10 +105,13 @@ class DefaultSource extends FileFormat with DataSourceRegister {

val parsedOptions: JSONOptions = new JSONOptions(options)
val requiredDataSchema = StructType(requiredColumns.map(dataSchema(_)))
val columnNameOfCorruptRecord =
parsedOptions.columnNameOfCorruptRecord
.getOrElse(sqlContext.conf.columnNameOfCorruptRecord)
val rows = JacksonParser.parse(
createBaseRdd(sqlContext, jsonFiles),
requiredDataSchema,
sqlContext.conf.columnNameOfCorruptRecord,
columnNameOfCorruptRecord,
parsedOptions)

rows.mapPartitions { iterator =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1067,6 +1067,27 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
}
}

test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = sqlContext.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)

assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}

test("SPARK-4068: nulls in arrays") {
val jsonDF = sqlContext.read.json(nullsInArrays)
jsonDF.registerTempTable("jsonTable")
Expand Down