diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala index e8bbc6b22a81..7ef90e73b1c5 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.parser import java.util.Locale import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, GlobalTempView, LocalTempView, PersistedView, UnresolvedAttribute, UnresolvedFunc, UnresolvedNamespace, UnresolvedPartitionSpec, UnresolvedRelation, UnresolvedStar, UnresolvedTable, UnresolvedTableOrView, UnresolvedView} +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, GlobalTempView, LocalTempView, PersistedView, UnresolvedAttribute, UnresolvedFunc, UnresolvedNamespace, UnresolvedRelation, UnresolvedStar, UnresolvedTable, UnresolvedTableOrView, UnresolvedView} import org.apache.spark.sql.catalyst.catalog.{ArchiveResource, BucketSpec, FileResource, FunctionResource, JarResource} import org.apache.spark.sql.catalyst.expressions.{EqualTo, Literal} import org.apache.spark.sql.catalyst.plans.logical._ @@ -2114,54 +2114,6 @@ class DDLParserSuite extends AnalysisTest { comparePlans(parsed2, expected2) } - // ALTER TABLE table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] - // ALTER VIEW table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] - test("alter table: drop partition") { - val sql1_table = - """ - |ALTER TABLE table_name DROP IF EXISTS PARTITION - |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk') - """.stripMargin - val sql2_table = - """ - |ALTER TABLE table_name DROP PARTITION - |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk') - """.stripMargin - val sql1_view = sql1_table.replace("TABLE", "VIEW") - val sql2_view = sql2_table.replace("TABLE", "VIEW") - - val parsed1_table = parsePlan(sql1_table) - val parsed2_table = parsePlan(sql2_table) - val parsed1_purge = parsePlan(sql1_table + " PURGE") - - assertUnsupported(sql1_view) - assertUnsupported(sql2_view) - - val expected1_table = AlterTableDropPartition( - UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), - Seq( - UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), - UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"))), - ifExists = true, - purge = false) - val expected2_table = expected1_table.copy(ifExists = false) - val expected1_purge = expected1_table.copy(purge = true) - - comparePlans(parsed1_table, expected1_table) - comparePlans(parsed2_table, expected2_table) - comparePlans(parsed1_purge, expected1_purge) - - val sql3_table = "ALTER TABLE a.b.c DROP IF EXISTS PARTITION (ds='2017-06-10')" - val expected3_table = AlterTableDropPartition( - UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... DROP PARTITION ..."), - Seq(UnresolvedPartitionSpec(Map("ds" -> "2017-06-10"))), - ifExists = true, - purge = false) - - val parsed3_table = parsePlan(sql3_table) - comparePlans(parsed3_table, expected3_table) - } - test("show current namespace") { comparePlans( parsePlan("SHOW CURRENT NAMESPACE"), diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala index 570976965ec7..7beb34c06323 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala @@ -18,18 +18,8 @@ package org.apache.spark.sql.connector import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionsException -import org.apache.spark.sql.connector.catalog.{CatalogV2Implicits, Identifier} -import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits -import org.apache.spark.sql.internal.SQLConf class AlterTablePartitionV2SQLSuite extends DatasourceV2SQLBase { - - import CatalogV2Implicits._ - import DataSourceV2Implicits._ - - test("ALTER TABLE RECOVER PARTITIONS") { val t = "testcat.ns1.ns2.tbl" withTable(t) { @@ -51,106 +41,4 @@ class AlterTablePartitionV2SQLSuite extends DatasourceV2SQLBase { assert(e.message.contains("ALTER TABLE RENAME PARTITION is only supported with v1 tables")) } } - - test("ALTER TABLE DROP PARTITION") { - val t = "testpart.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") - spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1)") - - val partTable = - catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")) - assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1)))) - } - } - - test("ALTER TABLE DROP PARTITIONS") { - val t = "testpart.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - spark.sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (id=1) LOCATION 'loc'" + - " PARTITION (id=2) LOCATION 'loc1'") - spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)") - - val partTable = - catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")) - assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1)))) - assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(2)))) - assert( - partTable.asPartitionable.listPartitionIdentifiers(Array.empty, InternalRow.empty).isEmpty) - } - } - - test("ALTER TABLE DROP PARTITIONS: partition not exists") { - val t = "testpart.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") - - assertThrows[NoSuchPartitionsException]( - spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)")) - - val partTable = - catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")) - assert(partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1)))) - - spark.sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)") - assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1)))) - assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(2)))) - assert( - partTable.asPartitionable.listPartitionIdentifiers(Array.empty, InternalRow.empty).isEmpty) - } - } - - test("case sensitivity in resolving partition specs") { - val t = "testpart.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - val errMsg = intercept[AnalysisException] { - spark.sql(s"ALTER TABLE $t DROP PARTITION (ID=1)") - }.getMessage - assert(errMsg.contains(s"ID is not a valid partition column in table $t")) - } - - val partTable = catalog("testpart").asTableCatalog - .loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")) - .asPartitionable - assert(!partTable.partitionExists(InternalRow.fromSeq(Seq(1)))) - - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - spark.sql(s"ALTER TABLE $t ADD PARTITION (ID=1) LOCATION 'loc1'") - assert(partTable.partitionExists(InternalRow.fromSeq(Seq(1)))) - spark.sql(s"ALTER TABLE $t DROP PARTITION (Id=1)") - assert(!partTable.partitionExists(InternalRow.fromSeq(Seq(1)))) - } - } - } - - test("SPARK-33650: drop partition into a table which doesn't support partition management") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING _") - val errMsg = intercept[AnalysisException] { - spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1)") - }.getMessage - assert(errMsg.contains(s"Table $t can not alter partitions")) - } - } - - test("SPARK-33676: not fully specified partition spec") { - val t = "testpart.ns1.ns2.tbl" - withTable(t) { - sql(s""" - |CREATE TABLE $t (id bigint, part0 int, part1 string) - |USING foo - |PARTITIONED BY (part0, part1)""".stripMargin) - val errMsg = intercept[AnalysisException] { - sql(s"ALTER TABLE $t DROP PARTITION (part0 = 1)") - }.getMessage - assert(errMsg.contains("Partition spec is invalid. " + - "The spec (part0) must match the partition spec (part0, part1)")) - } - } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala new file mode 100644 index 000000000000..53edd5854f28 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedPartitionSpec, UnresolvedTable} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.catalyst.plans.logical.AlterTableDropPartition +import org.apache.spark.sql.test.SharedSparkSession + +class AlterTableDropPartitionParserSuite extends AnalysisTest with SharedSparkSession { + test("drop partition") { + val sql = """ + |ALTER TABLE table_name DROP PARTITION + |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk') + """.stripMargin + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq( + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), + UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"))), + ifExists = false, + purge = false) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition if exists") { + val sql = """ + |ALTER TABLE table_name DROP IF EXISTS + |PARTITION (dt='2008-08-08', country='us'), + |PARTITION (dt='2009-09-09', country='uk') + """.stripMargin + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq( + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), + UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"))), + ifExists = true, + purge = false) + comparePlans(parsePlan(sql), expected) + } + + test("drop partition in a table with multi-part identifier") { + val sql = "ALTER TABLE a.b.c DROP IF EXISTS PARTITION (ds='2017-06-10')" + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... DROP PARTITION ..."), + Seq(UnresolvedPartitionSpec(Map("ds" -> "2017-06-10"))), + ifExists = true, + purge = false) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition with PURGE") { + val sql = "ALTER TABLE table_name DROP PARTITION (p=1) PURGE" + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq(UnresolvedPartitionSpec(Map("p" -> "1"))), + ifExists = false, + purge = true) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition from view") { + val sql = "ALTER VIEW table_name DROP PARTITION (p=1)" + val errMsg = intercept[ParseException] { + parsePlan(sql) + }.getMessage + assert(errMsg.contains("Operation not allowed")) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala new file mode 100644 index 000000000000..ed479e2824fb --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.scalactic.source.Position +import org.scalatest.Tag + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.execution.datasources.PartitioningUtils +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.test.SQLTestUtils + +trait AlterTableDropPartitionSuiteBase extends QueryTest with SQLTestUtils { + protected def version: String + protected def catalog: String + protected def defaultUsing: String + + protected def notFullPartitionSpecErr: String + + override def test(testName: String, testTags: Tag*)(testFun: => Any) + (implicit pos: Position): Unit = { + super.test(s"ALTER TABLE .. DROP PARTITION $version: " + testName, testTags: _*)(testFun) + } + + protected def withNsTable(ns: String, tableName: String, cat: String = catalog) + (f: String => Unit): Unit = { + val nsCat = s"$cat.$ns" + withNamespace(nsCat) { + sql(s"CREATE NAMESPACE $nsCat") + val t = s"$nsCat.$tableName" + withTable(t) { + f(t) + } + } + } + + protected def checkPartitions(t: String, expected: Map[String, String]*): Unit = { + val partitions = sql(s"SHOW PARTITIONS $t") + .collect() + .toSet + .map((row: Row) => row.getString(0)) + .map(PartitioningUtils.parsePathFragment) + assert(partitions === expected.toSet) + } + + protected def checkDropPartition( + t: String, + ifExists: String, + specs: Map[String, Any]*): Unit = { + checkPartitions(t, specs.map(_.mapValues(_.toString).toMap): _*) + val specStr = specs.map( + _.map { + case (k, v: String) => s"$k = '$v'" + case (k, v) => s"$k = $v" + }.mkString("PARTITION (", ", ", ")")) + .mkString(", ") + sql(s"ALTER TABLE $t DROP $ifExists $specStr") + checkPartitions(t) + } + + test("single partition") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + checkDropPartition(t, ifExists, Map("id" -> 1)) + } + } + } + + test("multiple partitions") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s""" + |ALTER TABLE $t ADD + |PARTITION (id=1) LOCATION 'loc' + |PARTITION (id=2) LOCATION 'loc1'""".stripMargin) + checkDropPartition(t, ifExists, Map("id" -> 1), Map("id" -> 2)) + } + } + } + + test("multi-part partition") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, a int, b string) $defaultUsing PARTITIONED BY (a, b)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (a = 2, b = 'abc')") + checkDropPartition(t, ifExists, Map("a" -> 2, "b" -> "abc")) + } + } + } + + test("table to alter does not exist") { + withNsTable("ns", "does_not_exist") { t => + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (a='4', b='9')") + }.getMessage + assert(errMsg.contains("Table not found")) + } + } + + test("case sensitivity in resolving partition specs") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (ID=1)") + }.getMessage + assert(errMsg.contains("ID is not a valid partition column")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (ID=1) LOCATION 'loc1'") + checkDropPartition(t, ifExists, Map("id" -> 1)) + } + } + } + } + + test("SPARK-33676: not fully specified partition spec") { + withNsTable("ns", "tbl") { t => + sql(s""" + |CREATE TABLE $t (id bigint, part0 int, part1 string) + |$defaultUsing + |PARTITIONED BY (part0, part1)""".stripMargin) + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (part0 = 1)") + }.getMessage + assert(errMsg.contains(notFullPartitionSpecErr)) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index 05e0f4f4a538..d6474ae7d5f0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -334,10 +334,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { testChangeColumn(isDatasourceTable = true) } - test("alter table: drop partition (datasource table)") { - testDropPartitions(isDatasourceTable = true) - } - test("alter table: rename partition (datasource table)") { testRenamePartitions(isDatasourceTable = true) } @@ -1617,59 +1613,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { } } - protected def testDropPartitions(isDatasourceTable: Boolean): Unit = { - if (!isUsingHiveMetastore) { - assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") - } - val catalog = spark.sessionState.catalog - val tableIdent = TableIdentifier("tab1", Some("dbx")) - val part1 = Map("a" -> "1", "b" -> "5") - val part2 = Map("a" -> "2", "b" -> "6") - val part3 = Map("a" -> "3", "b" -> "7") - val part4 = Map("a" -> "4", "b" -> "8") - val part5 = Map("a" -> "9", "b" -> "9") - createDatabase(catalog, "dbx") - createTable(catalog, tableIdent, isDatasourceTable) - createTablePartition(catalog, part1, tableIdent) - createTablePartition(catalog, part2, tableIdent) - createTablePartition(catalog, part3, tableIdent) - createTablePartition(catalog, part4, tableIdent) - createTablePartition(catalog, part5, tableIdent) - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(part1, part2, part3, part4, part5)) - - // basic drop partition - sql("ALTER TABLE dbx.tab1 DROP IF EXISTS PARTITION (a='4', b='8'), PARTITION (a='3', b='7')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part5)) - - // drop partitions without explicitly specifying database - catalog.setCurrentDatabase("dbx") - sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='2', b ='6')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5)) - - // table to alter does not exist - intercept[AnalysisException] { - sql("ALTER TABLE does_not_exist DROP IF EXISTS PARTITION (a='2')") - } - - // partition to drop does not exist - intercept[AnalysisException] { - sql("ALTER TABLE tab1 DROP PARTITION (a='300')") - } - - // partition to drop does not exist when using IF EXISTS - sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='300')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5)) - - // partition spec in DROP PARTITION should be case insensitive by default - sql("ALTER TABLE tab1 DROP PARTITION (A='1', B='5')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part5)) - - // use int literal as partition value for int type partition column - sql("ALTER TABLE tab1 DROP PARTITION (a=9, b=9)") - assert(catalog.listPartitions(tableIdent).isEmpty) - } - protected def testRenamePartitions(isDatasourceTable: Boolean): Unit = { if (!isUsingHiveMetastore) { assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala new file mode 100644 index 000000000000..5ad182bc689b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionsException +import org.apache.spark.sql.connector.catalog.CatalogManager +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.test.SharedSparkSession + +trait AlterTableDropPartitionSuiteBase extends command.AlterTableDropPartitionSuiteBase { + override def version: String = "V1" + override def catalog: String = CatalogManager.SESSION_CATALOG_NAME + override def defaultUsing: String = "USING parquet" + + override protected val notFullPartitionSpecErr = "The following partitions not found in table" +} + +class AlterTableDropPartitionSuite + extends AlterTableDropPartitionSuiteBase + with SharedSparkSession { + + test("partition not exists") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + + val errMsg = intercept[NoSuchPartitionsException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)") + }.getMessage + assert(errMsg.contains("partitions not found in table")) + + checkPartitions(t, Map("id" -> "1")) + sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)") + checkPartitions(t) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala new file mode 100644 index 000000000000..608e7d7c98f6 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.SparkConf +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionsException +import org.apache.spark.sql.connector.{InMemoryPartitionTableCatalog, InMemoryTableCatalog} +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.test.SharedSparkSession + +class AlterTableDropPartitionSuite + extends command.AlterTableDropPartitionSuiteBase + with SharedSparkSession { + + override def version: String = "V2" + override def catalog: String = "test_catalog" + override def defaultUsing: String = "USING _" + + override protected val notFullPartitionSpecErr = "Partition spec is invalid" + + override def sparkConf: SparkConf = super.sparkConf + .set(s"spark.sql.catalog.$catalog", classOf[InMemoryPartitionTableCatalog].getName) + .set(s"spark.sql.catalog.non_part_$catalog", classOf[InMemoryTableCatalog].getName) + + test("partition not exists") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + + val errMsg = intercept[NoSuchPartitionsException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)") + }.getMessage + assert(errMsg.contains("partitions not found in table")) + + checkPartitions(t, Map("id" -> "1")) + sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)") + checkPartitions(t) + } + } + + test("SPARK-33650: drop partition into a table which doesn't support partition management") { + withNsTable("ns", "tbl", s"non_part_$catalog") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1)") + }.getMessage + assert(errMsg.contains("can not alter partitions")) + } + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index b686d040b964..9bb9934e0936 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -163,10 +163,6 @@ class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with BeforeA testRenamePartitions(isDatasourceTable = false) } - test("alter table: drop partition") { - testDropPartitions(isDatasourceTable = false) - } - test("drop table") { testDropTable(isDatasourceTable = false) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala new file mode 100644 index 000000000000..fe26466cdad6 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command.v1 +import org.apache.spark.sql.hive.test.TestHiveSingleton + +class AlterTableDropPartitionSuite + extends v1.AlterTableDropPartitionSuiteBase + with TestHiveSingleton { + + override def version: String = "Hive V1" + override def defaultUsing: String = "USING HIVE" + + override protected val notFullPartitionSpecErr = "No partition is dropped" + + test("partition not exists") { + withNsTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)") + }.getMessage + assert(errMsg.contains("No partition is dropped")) + + checkPartitions(t, Map("id" -> "1")) + sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)") + checkPartitions(t) + } + } +}