-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement PURGE to remove DVs from Delta tables #1732
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -346,7 +346,28 @@ class DeltaSqlAstBuilder extends DeltaSqlBaseBaseVisitor[AnyRef] { | |
OptimizeTableCommand( | ||
Option(ctx.path).map(string), | ||
Option(ctx.table).map(visitTableIdentifier), | ||
Option(ctx.partitionPredicate).map(extractRawText(_)).toSeq, Map.empty)(interleaveBy) | ||
Option(ctx.partitionPredicate).map(extractRawText(_)).toSeq, | ||
Map.empty)(interleaveBy) | ||
} | ||
|
||
/** | ||
* Creates a [[ReorgTable]] logical plan. | ||
* Examples: | ||
* {{{ | ||
* -- Physically delete dropped columns of target table | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
* REORG TABLE (delta.`/path/to/table` | delta_table_name) | ||
* [WHERE partition_predicate] APPLY (PURGE) | ||
* }}} | ||
*/ | ||
override def visitReorgTable(ctx: ReorgTableContext): AnyRef = withOrigin(ctx) { | ||
if (ctx.table == null) { | ||
throw new ParseException("REORG command requires a file path or table name.", ctx) | ||
} | ||
|
||
val targetIdentifier = visitMultipartIdentifier(ctx.table) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why not use the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's mainly because
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We use |
||
val targetTable = createUnresolvedTable(targetIdentifier, "REORG") | ||
|
||
ReorgTable(targetTable)(Option(ctx.partitionPredicate).map(extractRawText(_)).toSeq) | ||
} | ||
|
||
override def visitDescribeDeltaDetail( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -109,7 +109,8 @@ case class OptimizeTableCommand( | |
path: Option[String], | ||
tableId: Option[TableIdentifier], | ||
userPartitionPredicates: Seq[String], | ||
options: Map[String, String])(val zOrderBy: Seq[UnresolvedAttribute]) | ||
options: Map[String, String], | ||
isPurge: Boolean = false)(val zOrderBy: Seq[UnresolvedAttribute]) | ||
extends OptimizeTableCommandBase with LeafRunnableCommand { | ||
|
||
override val otherCopyArgs: Seq[AnyRef] = zOrderBy :: Nil | ||
|
@@ -138,7 +139,8 @@ case class OptimizeTableCommand( | |
validateZorderByColumns(sparkSession, txn, zOrderBy) | ||
val zOrderByColumns = zOrderBy.map(_.name).toSeq | ||
|
||
new OptimizeExecutor(sparkSession, txn, partitionPredicates, zOrderByColumns).optimize() | ||
new OptimizeExecutor(sparkSession, txn, partitionPredicates, zOrderByColumns, isPurge) | ||
.optimize() | ||
} | ||
} | ||
|
||
|
@@ -154,7 +156,8 @@ class OptimizeExecutor( | |
sparkSession: SparkSession, | ||
txn: OptimisticTransaction, | ||
partitionPredicate: Seq[Expression], | ||
zOrderByColumns: Seq[String]) | ||
zOrderByColumns: Seq[String], | ||
isPurge: Boolean = false) | ||
extends DeltaCommand with SQLMetricsReporting with Serializable { | ||
|
||
/** Timestamp to use in [[FileAction]] */ | ||
|
@@ -164,18 +167,22 @@ class OptimizeExecutor( | |
|
||
def optimize(): Seq[Row] = { | ||
recordDeltaOperation(txn.deltaLog, "delta.optimize") { | ||
val minFileSize = sparkSession.sessionState.conf.getConf( | ||
DeltaSQLConf.DELTA_OPTIMIZE_MIN_FILE_SIZE) | ||
val maxFileSize = sparkSession.sessionState.conf.getConf( | ||
DeltaSQLConf.DELTA_OPTIMIZE_MAX_FILE_SIZE) | ||
require(minFileSize > 0, "minFileSize must be > 0") | ||
require(maxFileSize > 0, "maxFileSize must be > 0") | ||
|
||
val (minFileSize, maxDeletedRowsRatio) = if (isPurge) { | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
(0L, 0d) // Only selects files with DV | ||
} else { | ||
val minFileSize = sparkSession.sessionState.conf.getConf( | ||
DeltaSQLConf.DELTA_OPTIMIZE_MIN_FILE_SIZE) | ||
val maxDeletedRowsRatio = sparkSession.sessionState.conf.getConf( | ||
DeltaSQLConf.DELTA_OPTIMIZE_MAX_DELETED_ROWS_RATIO) | ||
require(minFileSize > 0, "minFileSize must be > 0") | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
(minFileSize, maxDeletedRowsRatio) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also does this select the files that don't have DV and gets rewritten? Do we want that? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It didn't select files without DVs, because with
|
||
val candidateFiles = txn.filterFiles(partitionPredicate, keepNumRecords = true) | ||
val partitionSchema = txn.metadata.partitionSchema | ||
|
||
val maxDeletedRowsRatio = sparkSession.sessionState.conf.getConf( | ||
DeltaSQLConf.DELTA_OPTIMIZE_MAX_DELETED_ROWS_RATIO) | ||
val filesToProcess = pruneCandidateFileList(minFileSize, maxDeletedRowsRatio, candidateFiles) | ||
val partitionsToCompact = filesToProcess.groupBy(_.partitionValues).toSeq | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
/* | ||
* Copyright (2021) The Delta Lake Project Authors. | ||
* | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.sql.delta.commands | ||
|
||
import org.apache.spark.sql.delta.catalog.DeltaTableV2 | ||
import org.apache.spark.sql.{Row, SparkSession} | ||
import org.apache.spark.sql.catalyst.plans.logical.{IgnoreCachedData, LeafCommand, LogicalPlan, UnaryCommand} | ||
import org.apache.spark.sql.catalyst.TableIdentifier | ||
|
||
case class ReorgTable(target: LogicalPlan)(val predicates: Seq[String]) extends UnaryCommand { | ||
|
||
def child: LogicalPlan = target | ||
|
||
protected def withNewChildInternal(newChild: LogicalPlan): LogicalPlan = | ||
copy(target = newChild)(predicates) | ||
|
||
override val otherCopyArgs: Seq[AnyRef] = predicates :: Nil | ||
} | ||
|
||
case class ReorgTableCommand(target: DeltaTableV2)(val predicates: Seq[String]) | ||
extends OptimizeTableCommandBase with LeafCommand with IgnoreCachedData { | ||
|
||
override val otherCopyArgs: Seq[AnyRef] = predicates :: Nil | ||
|
||
override def run(sparkSession: SparkSession): Seq[Row] = { | ||
val command = OptimizeTableCommand( | ||
Option(target.path.toString), | ||
target.catalogTable.map(_.identifier), | ||
predicates, | ||
options = Map.empty, | ||
isPurge = true)(zOrderBy = Nil) | ||
command.run(sparkSession) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is the command output expected to be same as the optimize? are the columns same? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, they're the same. The return schema contains two columns, |
||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
/* | ||
* Copyright (2021) The Delta Lake Project Authors. | ||
* | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.sql.delta | ||
|
||
import org.apache.spark.sql.delta.test.DeltaSQLCommandTest | ||
import org.apache.spark.sql.QueryTest | ||
import org.apache.spark.sql.test.SharedSparkSession | ||
|
||
class DeltaPurgeSuite extends QueryTest | ||
with SharedSparkSession | ||
with DeltaSQLCommandTest | ||
with DeletionVectorsTestUtils { | ||
|
||
import testImplicits._ | ||
|
||
def executePurge(table: String, condition: Option[String] = None): Unit = { | ||
condition match { | ||
case Some(cond) => sql(s"REORG TABLE delta.`$table` WHERE $cond APPLY (PURGE)") | ||
case None => sql(s"REORG TABLE delta.`$table` APPLY (PURGE)") | ||
} | ||
} | ||
|
||
testWithDVs("Purge DVs will combine small files") { | ||
withTempDir { tempDir => | ||
val path = tempDir.getCanonicalPath | ||
val log = DeltaLog.forTable(spark, path) | ||
spark | ||
.range(0, 100, 1, numPartitions = 5) | ||
.write | ||
.format("delta") | ||
.save(path) | ||
sql(s"DELETE FROM delta.`$path` WHERE id IN (0, 99)") | ||
assert(log.update().allFiles.filter(_.deletionVector != null).count() === 2) | ||
executePurge(path) | ||
val (addFiles, _) = getFileActionsInLastVersion(log) | ||
assert(addFiles.forall(_.deletionVector === null)) | ||
checkAnswer( | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
sql(s"SELECT * FROM delta.`$path`"), | ||
(1 to 98).toDF()) | ||
} | ||
} | ||
|
||
testWithDVs("Purge DVs") { | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
withTempDir { tempDir => | ||
val path = tempDir.getCanonicalPath | ||
val log = DeltaLog.forTable(spark, path) | ||
spark | ||
.range(0, 100, 1, numPartitions = 5) | ||
.write | ||
.format("delta") | ||
.save(path) | ||
sql(s"DELETE FROM delta.`$path` WHERE id IN (0, 99)") | ||
assert(log.update().allFiles.filter(_.deletionVector != null).count() === 2) | ||
|
||
// First purge | ||
executePurge(path) | ||
val (addFiles, _) = getFileActionsInLastVersion(log) | ||
assert(addFiles.size === 1) // two files are combined | ||
assert(addFiles.forall(_.deletionVector === null)) | ||
checkAnswer( | ||
sql(s"SELECT * FROM delta.`$path`"), | ||
(1 to 98).toDF()) | ||
|
||
// Second purge is a noop | ||
val versionBefore = log.update().version | ||
executePurge(path) | ||
val versionAfter = log.update().version | ||
assert(versionBefore === versionAfter) | ||
} | ||
} | ||
|
||
test("Purge a non-DV table is a noop") { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does this compact the files? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No. The final |
||
withTempDir { tempDir => | ||
val path = tempDir.getCanonicalPath | ||
val log = DeltaLog.forTable(spark, path) | ||
spark | ||
.range(0, 100, 1, numPartitions = 5) | ||
.write | ||
.format("delta") | ||
.save(path) | ||
val versionBefore = log.update().version | ||
executePurge(path) | ||
val versionAfter = log.update().version | ||
assert(versionBefore === versionAfter) | ||
} | ||
} | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is this change intended?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, because I see other rules do not have spaces around
=
.