Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

incremental query file and partition pruning #2 #6

Open
wants to merge 1 commit into
base: release-0.14.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hudi.common.model;

import org.apache.hadoop.fs.GlobPattern;
import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.util.JsonUtils;
import org.apache.hudi.common.util.Option;
Expand Down Expand Up @@ -174,18 +175,21 @@ public Map<HoodieFileGroupId, String> getFileGroupIdAndFullPaths(String basePath
* @param basePath The base path
* @return the file full path to file status mapping
*/
public Map<String, FileStatus> getFullPathToFileStatus(Configuration hadoopConf, String basePath) {
public Map<String, FileStatus> getFullPathToFileStatus(Configuration hadoopConf, String basePath, String fileNamePattern) {
Map<String, FileStatus> fullPathToFileStatus = new HashMap<>();
GlobPattern globMatcher = new GlobPattern(fileNamePattern);
for (List<HoodieWriteStat> stats : getPartitionToWriteStats().values()) {
// Iterate through all the written files.
for (HoodieWriteStat stat : stats) {
String relativeFilePath = stat.getPath();
Path fullPath = relativeFilePath != null ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null;
if (fullPath != null) {
long blockSize = FSUtils.getFs(fullPath.toString(), hadoopConf).getDefaultBlockSize(fullPath);
FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, blockSize,
0, fullPath);
fullPathToFileStatus.put(fullPath.getName(), fileStatus);
if (fileNamePattern.isEmpty() || globMatcher.matches(relativeFilePath)) {
Path fullPath = relativeFilePath != null ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null;
if (fullPath != null) {
long blockSize = FSUtils.getFs(fullPath.toString(), hadoopConf).getDefaultBlockSize(fullPath);
FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, blockSize,
0, fullPath);
fullPathToFileStatus.put(fullPath.getName(), fileStatus);
}
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ private static Map<String, FileStatus> getFilesToRead(
case COPY_ON_WRITE:
return metadata.getFileIdToFileStatus(hadoopConf, basePath);
case MERGE_ON_READ:
return metadata.getFullPathToFileStatus(hadoopConf, basePath);
return metadata.getFullPathToFileStatus(hadoopConf, basePath, "");
default:
throw new AssertionError();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ protected List<FileStatus> listStatusForIncrementalMode(JobConf job,

// build fileGroup from fsView
List<FileStatus> affectedFileStatus = Arrays.asList(HoodieInputFormatUtils
.listAffectedFilesForCommits(job, new Path(tableMetaClient.getBasePath()), metadataList));
.listAffectedFilesForCommits(job, new Path(tableMetaClient.getBasePath()), metadataList, ""));
// step3
HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(tableMetaClient, commitsTimelineToReturn, affectedFileStatus.toArray(new FileStatus[0]));
// build fileGroup from fsView
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -505,12 +505,12 @@ private static HoodieBaseFile refreshFileStatus(Configuration conf, HoodieBaseFi
* @param metadataList The metadata list to read the data from
* @return the affected file status array
*/
public static FileStatus[] listAffectedFilesForCommits(Configuration hadoopConf, Path basePath, List<HoodieCommitMetadata> metadataList) {
public static FileStatus[] listAffectedFilesForCommits(Configuration hadoopConf, Path basePath, List<HoodieCommitMetadata> metadataList, String fileNamePattern) {
// TODO: Use HoodieMetaTable to extract affected file directly.
HashMap<String, FileStatus> fullPathToFileStatus = new HashMap<>();
// Iterate through the given commits.
for (HoodieCommitMetadata metadata : metadataList) {
fullPathToFileStatus.putAll(metadata.getFullPathToFileStatus(hadoopConf, basePath.toString()));
for (HoodieCommitMetadata metadata: metadataList) {
fullPathToFileStatus.putAll(metadata.getFullPathToFileStatus(hadoopConf, basePath.toString(), fileNamePattern));
}
return fullPathToFileStatus.values().toArray(new FileStatus[0]);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,12 @@ object DataSourceReadOptions {
.withDocumentation("For the use-cases like users only want to incremental pull from certain partitions "
+ "instead of the full table. This option allows using glob pattern to directly filter on path.")

val INCR_PARTITION_GLOB: ConfigProperty[String] = ConfigProperty
.key("hoodie.datasource.read.incr.partition.glob")
.defaultValue("")
.withDocumentation("For the use-cases like users only want to incremental pull from certain partitions "
+ "instead of the full table. This option allows using glob pattern to directly filter on partition.")

val TIME_TRAVEL_AS_OF_INSTANT: ConfigProperty[String] = HoodieCommonConfig.TIMESTAMP_AS_OF

val ENABLE_DATA_SKIPPING: ConfigProperty[Boolean] = ConfigProperty
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,14 @@ case class MergeOnReadIncrementalRelation(override val sqlContext: SQLContext,
val fsView = new HoodieTableFileSystemView(metaClient, timeline, affectedFilesInCommits)

val modifiedPartitions = getWritePartitionPaths(commitsMetadata)
val globMatcher = new GlobPattern("*" + globPartitionPattern)

modifiedPartitions.asScala.flatMap { relativePartitionPath =>
modifiedPartitions.asScala.filter(p => globMatcher.matches(p)).flatMap { relativePartitionPath =>
fsView.getLatestMergedFileSlicesBeforeOrOn(relativePartitionPath, latestCommit).iterator().asScala
}.toSeq
}

buildSplits(filterFileSlices(fileSlices, globPattern))
buildSplits(fileSlices)
}
}

Expand Down Expand Up @@ -175,7 +176,7 @@ trait HoodieIncrementalRelationTrait extends HoodieBaseRelation {
protected lazy val commitsMetadata = includedCommits.map(getCommitMetadata(_, super.timeline)).asJava

protected lazy val affectedFilesInCommits: Array[FileStatus] = {
listAffectedFilesForCommits(conf, new Path(metaClient.getBasePath), commitsMetadata)
listAffectedFilesForCommits(conf, new Path(metaClient.getBasePath), commitsMetadata, "*" + globPathPattern)
}

protected lazy val (includeStartTime, startTs) = if (startInstantArchived) {
Expand Down Expand Up @@ -227,8 +228,10 @@ trait HoodieIncrementalRelationTrait extends HoodieBaseRelation {
}
}

protected def globPattern: String =
protected def globPathPattern: String =
optParams.getOrElse(DataSourceReadOptions.INCR_PATH_GLOB.key, DataSourceReadOptions.INCR_PATH_GLOB.defaultValue)

protected def globPartitionPattern: String =
optParams.getOrElse(DataSourceReadOptions.INCR_PARTITION_GLOB.key, DataSourceReadOptions.INCR_PARTITION_GLOB.defaultValue)
}