Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ hiveNativeCommands
| ROLLBACK WORK?
| SHOW PARTITIONS tableIdentifier partitionSpec?
| DFS .*?
| (CREATE | ALTER | DROP | SHOW | DESC | DESCRIBE | MSCK | LOAD) .*?
| (CREATE | ALTER | DROP | SHOW | DESC | DESCRIBE | LOAD) .*?
;

unsupportedHiveNativeCommands
Expand Down Expand Up @@ -177,6 +177,7 @@ unsupportedHiveNativeCommands
| kw1=UNLOCK kw2=DATABASE
| kw1=CREATE kw2=TEMPORARY kw3=MACRO
| kw1=DROP kw2=TEMPORARY kw3=MACRO
| kw1=MSCK kw2=REPAIR kw3=TABLE
;

createTableHeader
Expand Down Expand Up @@ -651,7 +652,7 @@ nonReserved
| AFTER | CASCADE | RESTRICT | BUCKETS | CLUSTERED | SORTED | PURGE | INPUTFORMAT | OUTPUTFORMAT
| INPUTDRIVER | OUTPUTDRIVER | DBPROPERTIES | DFS | TRUNCATE | METADATA | REPLICATION | COMPUTE
| STATISTICS | ANALYZE | PARTITIONED | EXTERNAL | DEFINED | RECORDWRITER
| REVOKE | GRANT | LOCK | UNLOCK | MSCK | EXPORT | IMPORT | LOAD | VALUES | COMMENT | ROLE
| REVOKE | GRANT | LOCK | UNLOCK | MSCK | REPAIR | EXPORT | IMPORT | LOAD | VALUES | COMMENT | ROLE
| ROLES | COMPACTIONS | PRINCIPALS | TRANSACTIONS | INDEX | INDEXES | LOCKS | OPTION
;

Expand Down Expand Up @@ -867,6 +868,7 @@ GRANT: 'GRANT';
LOCK: 'LOCK';
UNLOCK: 'UNLOCK';
MSCK: 'MSCK';
REPAIR: 'REPAIR';
EXPORT: 'EXPORT';
IMPORT: 'IMPORT';
LOAD: 'LOAD';
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,31 +281,37 @@ abstract class CatalogTestCases extends SparkFunSuite with BeforeAndAfterEach {
test("drop partitions") {
val catalog = newBasicCatalog()
assert(catalogPartitionsEqual(catalog, "db2", "tbl2", Seq(part1, part2)))
catalog.dropPartitions("db2", "tbl2", Seq(part1.spec), ignoreIfNotExists = false)
catalog.dropPartitions(
"db2", "tbl2", Seq(part1.spec), ignoreIfNotExists = false)
assert(catalogPartitionsEqual(catalog, "db2", "tbl2", Seq(part2)))
resetState()
val catalog2 = newBasicCatalog()
assert(catalogPartitionsEqual(catalog2, "db2", "tbl2", Seq(part1, part2)))
catalog2.dropPartitions("db2", "tbl2", Seq(part1.spec, part2.spec), ignoreIfNotExists = false)
catalog2.dropPartitions(
"db2", "tbl2", Seq(part1.spec, part2.spec), ignoreIfNotExists = false)
assert(catalog2.listPartitions("db2", "tbl2").isEmpty)
}

test("drop partitions when database/table does not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.dropPartitions("does_not_exist", "tbl1", Seq(), ignoreIfNotExists = false)
catalog.dropPartitions(
"does_not_exist", "tbl1", Seq(), ignoreIfNotExists = false)
}
intercept[AnalysisException] {
catalog.dropPartitions("db2", "does_not_exist", Seq(), ignoreIfNotExists = false)
catalog.dropPartitions(
"db2", "does_not_exist", Seq(), ignoreIfNotExists = false)
}
}

test("drop partitions that do not exist") {
val catalog = newBasicCatalog()
intercept[AnalysisException] {
catalog.dropPartitions("db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = false)
catalog.dropPartitions(
"db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = false)
}
catalog.dropPartitions("db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = true)
catalog.dropPartitions(
"db2", "tbl2", Seq(part3.spec), ignoreIfNotExists = true)
}

test("get partition") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -496,42 +496,56 @@ class SessionCatalogSuite extends SparkFunSuite {
val sessionCatalog = new SessionCatalog(externalCatalog)
assert(catalogPartitionsEqual(externalCatalog, "db2", "tbl2", Seq(part1, part2)))
sessionCatalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1.spec), ignoreIfNotExists = false)
TableIdentifier("tbl2", Some("db2")),
Seq(part1.spec),
ignoreIfNotExists = false)
assert(catalogPartitionsEqual(externalCatalog, "db2", "tbl2", Seq(part2)))
// Drop partitions without explicitly specifying database
sessionCatalog.setCurrentDatabase("db2")
sessionCatalog.dropPartitions(
TableIdentifier("tbl2"), Seq(part2.spec), ignoreIfNotExists = false)
TableIdentifier("tbl2"),
Seq(part2.spec),
ignoreIfNotExists = false)
assert(externalCatalog.listPartitions("db2", "tbl2").isEmpty)
// Drop multiple partitions at once
sessionCatalog.createPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1, part2), ignoreIfExists = false)
assert(catalogPartitionsEqual(externalCatalog, "db2", "tbl2", Seq(part1, part2)))
sessionCatalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1.spec, part2.spec), ignoreIfNotExists = false)
TableIdentifier("tbl2", Some("db2")),
Seq(part1.spec, part2.spec),
ignoreIfNotExists = false)
assert(externalCatalog.listPartitions("db2", "tbl2").isEmpty)
}

test("drop partitions when database/table does not exist") {
val catalog = new SessionCatalog(newBasicCatalog())
intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl1", Some("does_not_exist")), Seq(), ignoreIfNotExists = false)
TableIdentifier("tbl1", Some("does_not_exist")),
Seq(),
ignoreIfNotExists = false)
}
intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("does_not_exist", Some("db2")), Seq(), ignoreIfNotExists = false)
TableIdentifier("does_not_exist", Some("db2")),
Seq(),
ignoreIfNotExists = false)
}
}

test("drop partitions that do not exist") {
val catalog = new SessionCatalog(newBasicCatalog())
intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part3.spec), ignoreIfNotExists = false)
TableIdentifier("tbl2", Some("db2")),
Seq(part3.spec),
ignoreIfNotExists = false)
}
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part3.spec), ignoreIfNotExists = true)
TableIdentifier("tbl2", Some("db2")),
Seq(part3.spec),
ignoreIfNotExists = true)
}

test("get partition") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,9 @@ class SparkSqlAstBuilder extends AstBuilder {
*/
override def visitAddTablePartition(
ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) throw new ParseException(s"Operation not allowed: partitioned views", ctx)
if (ctx.VIEW != null) {
throw new AnalysisException(s"Operation not allowed: partitioned views")
}
// Create partition spec to location mapping.
val specsAndLocs = if (ctx.partitionSpec.isEmpty) {
ctx.partitionSpecLocation.asScala.map {
Expand All @@ -509,8 +511,7 @@ class SparkSqlAstBuilder extends AstBuilder {
AlterTableAddPartition(
visitTableIdentifier(ctx.tableIdentifier),
specsAndLocs,
ctx.EXISTS != null)(
command(ctx))
ctx.EXISTS != null)
}

/**
Expand All @@ -523,11 +524,8 @@ class SparkSqlAstBuilder extends AstBuilder {
*/
override def visitExchangeTablePartition(
ctx: ExchangeTablePartitionContext): LogicalPlan = withOrigin(ctx) {
AlterTableExchangePartition(
visitTableIdentifier(ctx.from),
visitTableIdentifier(ctx.to),
visitNonOptionalPartitionSpec(ctx.partitionSpec))(
command(ctx))
throw new AnalysisException(
"Operation not allowed: ALTER TABLE ... EXCHANGE PARTITION ...")
}

/**
Expand All @@ -543,8 +541,7 @@ class SparkSqlAstBuilder extends AstBuilder {
AlterTableRenamePartition(
visitTableIdentifier(ctx.tableIdentifier),
visitNonOptionalPartitionSpec(ctx.from),
visitNonOptionalPartitionSpec(ctx.to))(
command(ctx))
visitNonOptionalPartitionSpec(ctx.to))
}

/**
Expand All @@ -561,13 +558,16 @@ class SparkSqlAstBuilder extends AstBuilder {
*/
override def visitDropTablePartitions(
ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) throw new ParseException(s"Operation not allowed: partitioned views", ctx)
if (ctx.VIEW != null) {
throw new AnalysisException(s"Operation not allowed: partitioned views")
}
if (ctx.PURGE != null) {
throw new AnalysisException(s"Operation not allowed: PURGE")
}
AlterTableDropPartition(
visitTableIdentifier(ctx.tableIdentifier),
ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec),
ctx.EXISTS != null,
ctx.PURGE != null)(
command(ctx))
ctx.EXISTS != null)
}

/**
Expand All @@ -580,10 +580,8 @@ class SparkSqlAstBuilder extends AstBuilder {
*/
override def visitArchiveTablePartition(
ctx: ArchiveTablePartitionContext): LogicalPlan = withOrigin(ctx) {
AlterTableArchivePartition(
visitTableIdentifier(ctx.tableIdentifier),
visitNonOptionalPartitionSpec(ctx.partitionSpec))(
command(ctx))
throw new AnalysisException(
"Operation not allowed: ALTER TABLE ... ARCHIVE PARTITION ...")
}

/**
Expand All @@ -596,10 +594,8 @@ class SparkSqlAstBuilder extends AstBuilder {
*/
override def visitUnarchiveTablePartition(
ctx: UnarchiveTablePartitionContext): LogicalPlan = withOrigin(ctx) {
AlterTableUnarchivePartition(
visitTableIdentifier(ctx.tableIdentifier),
visitNonOptionalPartitionSpec(ctx.partitionSpec))(
command(ctx))
throw new AnalysisException(
"Operation not allowed: ALTER TABLE ... UNARCHIVE PARTITION ...")
}

/**
Expand Down Expand Up @@ -658,10 +654,7 @@ class SparkSqlAstBuilder extends AstBuilder {
* }}}
*/
override def visitTouchTable(ctx: TouchTableContext): LogicalPlan = withOrigin(ctx) {
AlterTableTouch(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))(
command(ctx))
throw new AnalysisException("Operation not allowed: ALTER TABLE ... TOUCH ...")
}

/**
Expand All @@ -673,11 +666,7 @@ class SparkSqlAstBuilder extends AstBuilder {
* }}}
*/
override def visitCompactTable(ctx: CompactTableContext): LogicalPlan = withOrigin(ctx) {
AlterTableCompact(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
string(ctx.STRING))(
command(ctx))
throw new AnalysisException("Operation not allowed: ALTER TABLE ... COMPACT ...")
}

/**
Expand All @@ -689,10 +678,7 @@ class SparkSqlAstBuilder extends AstBuilder {
* }}}
*/
override def visitConcatenateTable(ctx: ConcatenateTableContext): LogicalPlan = withOrigin(ctx) {
AlterTableMerge(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))(
command(ctx))
throw new AnalysisException("Operation not allowed: ALTER TABLE ... CONCATENATE")
}

/**
Expand Down
Loading