diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala index 388df7002dc3..c3561099d684 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala @@ -351,7 +351,7 @@ private[sql] object PartitioningUtils { } } - if (partitionColumns.size == schema.fields.size) { + if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) { throw new AnalysisException(s"Cannot use all columns for partition columns") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala index 3fa3864bc969..a4041d7ebe67 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala @@ -218,8 +218,9 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path) } intercept[AnalysisException] { - spark.range(10).write.format("orc").mode("overwrite").partitionBy("id").save(path) + spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path) } + spark.emptyDataFrame.write.format("parquet").mode("overwrite").save(path) } }