Skip to content

Commit 7b85c51

Browse files
committed
null is a valid partition value
1 parent 05887fc commit 7b85c51

File tree

5 files changed

+26
-8
lines changed

5 files changed

+26
-8
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,12 @@ case class CatalogTablePartition(
115115
val caseInsensitiveProperties = CaseInsensitiveMap(storage.properties)
116116
val timeZoneId = caseInsensitiveProperties.getOrElse("timeZone", defaultTimeZondId)
117117
InternalRow.fromSeq(partitionSchema.map { field =>
118-
Cast(Literal(spec(field.name)), field.dataType, Option(timeZoneId)).eval()
118+
val partValue = if (spec(field.name) == ExternalCatalogUtils.DEFAULT_PARTITION_NAME) {
119+
null
120+
} else {
121+
spec(field.name)
122+
}
123+
Cast(Literal(partValue), field.dataType, Option(timeZoneId)).eval()
119124
})
120125
}
121126
}
@@ -163,7 +168,7 @@ case class BucketSpec(
163168
* @param tracksPartitionsInCatalog whether this table's partition metadata is stored in the
164169
* catalog. If false, it is inferred automatically based on file
165170
* structure.
166-
* @param schemaPresevesCase Whether or not the schema resolved for this table is case-sensitive.
171+
* @param schemaPreservesCase Whether or not the schema resolved for this table is case-sensitive.
167172
* When using a Hive Metastore, this flag is set to false if a case-
168173
* sensitive schema was unable to be read from the table properties.
169174
* Used to trigger case-sensitive schema inference at query time, when

sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ case class FileSourceScanExec(
319319
val input = ctx.freshName("input")
320320
ctx.addMutableState("scala.collection.Iterator", input, s"$input = inputs[0];")
321321
val exprRows = output.zipWithIndex.map{ case (a, i) =>
322-
new BoundReference(i, a.dataType, a.nullable)
322+
BoundReference(i, a.dataType, a.nullable)
323323
}
324324
val row = ctx.freshName("row")
325325
ctx.INPUT_ROW = row

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
3333
import org.apache.spark.sql.catalyst.expressions.{Cast, Literal}
3434
import org.apache.spark.sql.catalyst.util.DateTimeUtils
3535
import org.apache.spark.sql.types._
36-
import org.apache.spark.unsafe.types.UTF8String
3736

3837
// TODO: We should tighten up visibility of the classes here once we clean up Hive coupling.
3938

@@ -129,7 +128,7 @@ object PartitioningUtils {
129128
// "hdfs://host:9000/invalidPath"
130129
// "hdfs://host:9000/path"
131130
// TODO: Selective case sensitivity.
132-
val discoveredBasePaths = optDiscoveredBasePaths.flatMap(x => x).map(_.toString.toLowerCase())
131+
val discoveredBasePaths = optDiscoveredBasePaths.flatten.map(_.toString.toLowerCase())
133132
assert(
134133
discoveredBasePaths.distinct.size == 1,
135134
"Conflicting directory structures detected. Suspicious paths:\b" +

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,8 +1004,8 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat
10041004
val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName)
10051005
val clientPartitionNames =
10061006
client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec))
1007-
clientPartitionNames.map { partName =>
1008-
val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partName)
1007+
clientPartitionNames.map { partitionPath =>
1008+
val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath)
10091009
partSpec.map { case (partName, partValue) =>
10101010
partColNameMap(partName.toLowerCase) + "=" + escapePathName(partValue)
10111011
}.mkString("/")

sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import java.io.File
2222
import org.apache.hadoop.fs.Path
2323

2424
import org.apache.spark.metrics.source.HiveCatalogMetrics
25-
import org.apache.spark.sql.{AnalysisException, QueryTest}
25+
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
2626
import org.apache.spark.sql.catalyst.TableIdentifier
2727
import org.apache.spark.sql.hive.test.TestHiveSingleton
2828
import org.apache.spark.sql.internal.SQLConf
@@ -316,6 +316,20 @@ class PartitionProviderCompatibilitySuite
316316
}
317317
}
318318
}
319+
320+
test(s"SPARK-19887 partition value is null - partition management $enabled") {
321+
withTable("test") {
322+
Seq((1, "p", 1), (2, null, 2)).toDF("a", "b", "c")
323+
.write.partitionBy("b", "c").saveAsTable("test")
324+
checkAnswer(spark.table("test"),
325+
Row(1, "p", 1) :: Row(2, null, 2) :: Nil)
326+
327+
Seq((3, null: String, 3)).toDF("a", "b", "c")
328+
.write.mode("append").partitionBy("b", "c").saveAsTable("test")
329+
checkAnswer(spark.table("test"),
330+
Row(1, "p", 1) :: Row(2, null, 2) :: Row(3, null, 3) :: Nil)
331+
}
332+
}
319333
}
320334

321335
/**

0 commit comments

Comments
 (0)