Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/layouts/shortcodes/generated/core_configuration.html
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
<tr>
<td><h5>changelog-producer.row-deduplicate</h5></td>
<td style="word-wrap: break-word;">false</td>
<td><p>Boolean</p></td>
<td>Boolean</td>
<td>Whether to generate -U, +U changelog for the same record. This configuration is only valid for the changelog-producer is lookup or full-compaction.</td>
</tr>
<tr>
Expand Down Expand Up @@ -300,7 +300,7 @@
<td><h5>scan.manifest.parallelism</h5></td>
<td style="word-wrap: break-word;">(none)</td>
<td>Integer</td>
<td>The parallelism of scanning manifest files, default value is the size of cpu processor.Note: Scale-up this parameter will increase memory usage while scanning manifest files.We can consider downsize it when we encounter an out of memory exception while scanning</td>
<td>The parallelism of scanning manifest files, default value is the size of cpu processor. Note: Scale-up this parameter will increase memory usage while scanning manifest files. We can consider downsize it when we encounter an out of memory exception while scanning</td>
</tr>
<tr>
<td><h5>scan.mode</h5></td>
Expand Down
4 changes: 2 additions & 2 deletions paimon-core/src/main/java/org/apache/paimon/CoreOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,8 @@ public class CoreOptions implements Serializable {
.intType()
.noDefaultValue()
.withDescription(
"The parallelism of scanning manifest files, default value is the size of cpu processor."
+ "Note: Scale-up this parameter will increase memory usage while scanning manifest files."
"The parallelism of scanning manifest files, default value is the size of cpu processor. "
+ "Note: Scale-up this parameter will increase memory usage while scanning manifest files. "
+ "We can consider downsize it when we encounter an out of memory exception while scanning");

public static final ConfigOption<LogConsistency> LOG_CONSISTENCY =
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -24,30 +24,61 @@
import org.apache.paimon.types.IntType;
import org.apache.paimon.types.RowType;
import org.apache.paimon.types.TinyIntType;
import org.apache.paimon.utils.FileStorePathFactory;
import org.apache.paimon.utils.Preconditions;

import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;

import static org.apache.paimon.utils.SerializationUtils.newBytesType;

/** Entry of a manifest file, representing an addition / deletion of a data file. */
public class ManifestEntry extends AbstractManifestEntry {
public class ManifestEntry {

private final FileKind kind;
// for tables without partition this field should be a row with 0 columns (not null)
private final BinaryRow partition;
private final int bucket;
private final int totalBuckets;
private final DataFileMeta file;

public ManifestEntry(
FileKind kind, BinaryRow partition, int bucket, int totalBuckets, DataFileMeta file) {
super(kind, file.fileName(), partition, bucket, totalBuckets, file.level());
this.kind = kind;
this.partition = partition;
this.bucket = bucket;
this.totalBuckets = totalBuckets;
this.file = file;
}

public FileKind kind() {
return kind;
}

public BinaryRow partition() {
return partition;
}

public int bucket() {
return bucket;
}

public int totalBuckets() {
return totalBuckets;
}

public DataFileMeta file() {
return file;
}

public Identifier identifier() {
return new Identifier(partition, bucket, file.level(), file.fileName());
}

public static RowType schema() {
List<DataField> fields = new ArrayList<>();
fields.add(new DataField(0, "_KIND", new TinyIntType(false)));
Expand Down Expand Up @@ -81,6 +112,43 @@ public String toString() {
return String.format("{%s, %s, %d, %d, %s}", kind, partition, bucket, totalBuckets, file);
}

public static Collection<ManifestEntry> mergeEntries(Iterable<ManifestEntry> entries) {
LinkedHashMap<Identifier, ManifestEntry> map = new LinkedHashMap<>();
mergeEntries(entries, map);
return map.values();
}

public static void mergeEntries(
Iterable<ManifestEntry> entries, Map<Identifier, ManifestEntry> map) {
for (ManifestEntry entry : entries) {
ManifestEntry.Identifier identifier = entry.identifier();
switch (entry.kind()) {
case ADD:
Preconditions.checkState(
!map.containsKey(identifier),
"Trying to add file %s which is already added. Manifest might be corrupted.",
identifier);
map.put(identifier, entry);
break;
case DELETE:
// each dataFile will only be added once and deleted once,
// if we know that it is added before then both add and delete entry can be
// removed because there won't be further operations on this file,
// otherwise we have to keep the delete entry because the add entry must be
// in the previous manifest files
if (map.containsKey(identifier)) {
map.remove(identifier);
} else {
map.put(identifier, entry);
}
break;
default:
throw new UnsupportedOperationException(
"Unknown value kind " + entry.kind().name());
}
}
}

public static void assertNoDelete(Collection<ManifestEntry> entries) {
for (ManifestEntry entry : entries) {
Preconditions.checkState(
Expand All @@ -89,4 +157,54 @@ public static void assertNoDelete(Collection<ManifestEntry> entries) {
entry.file().fileName());
}
}

/**
* The same {@link Identifier} indicates that the {@link ManifestEntry} refers to the same data
* file.
*/
public static class Identifier {
public final BinaryRow partition;
public final int bucket;
public final int level;
public final String fileName;

private Identifier(BinaryRow partition, int bucket, int level, String fileName) {
this.partition = partition;
this.bucket = bucket;
this.level = level;
this.fileName = fileName;
}

@Override
public boolean equals(Object o) {
if (!(o instanceof Identifier)) {
return false;
}
Identifier that = (Identifier) o;
return Objects.equals(partition, that.partition)
&& bucket == that.bucket
&& level == that.level
&& Objects.equals(fileName, that.fileName);
}

@Override
public int hashCode() {
return Objects.hash(partition, bucket, level, fileName);
}

@Override
public String toString() {
return String.format("{%s, %d, %d, %s}", partition, bucket, level, fileName);
}

public String toString(FileStorePathFactory pathFactory) {
return pathFactory.getPartitionString(partition)
+ ", bucket "
+ bucket
+ ", level "
+ level
+ ", file "
+ fileName;
}
}
}
Loading