Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions core/src/main/java/org/apache/iceberg/rest/RESTSessionCatalog.java
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableCommit;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.encryption.EncryptionUtil;
import org.apache.iceberg.encryption.KeyManagementClient;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NoSuchNamespaceException;
import org.apache.iceberg.exceptions.NoSuchTableException;
Expand Down Expand Up @@ -159,6 +161,7 @@ public class RESTSessionCatalog extends BaseViewSessionCatalog
private Integer pageSize = null;
private CloseableGroup closeables = null;
private Set<Endpoint> endpoints;
private KeyManagementClient kmsClient = null;

public RESTSessionCatalog() {
this(
Expand Down Expand Up @@ -256,6 +259,12 @@ public void initialize(String name, Map<String, String> unresolved) {
mergedProps,
RESTCatalogProperties.METRICS_REPORTING_ENABLED,
RESTCatalogProperties.METRICS_REPORTING_ENABLED_DEFAULT);

if (mergedProps.containsKey(CatalogProperties.ENCRYPTION_KMS_IMPL)) {
this.kmsClient = EncryptionUtil.createKmsClient(mergedProps);
this.closeables.addCloseable(this.kmsClient);
}

super.initialize(name, mergedProps);
}

Expand Down Expand Up @@ -455,6 +464,7 @@ public Table loadTable(SessionContext context, TableIdentifier identifier) {
paths.table(finalIdentifier),
Map::of,
tableFileIO(context, tableConf, response.credentials()),
kmsClient,
tableMetadata,
endpoints);

Expand Down Expand Up @@ -534,6 +544,7 @@ public Table registerTable(
paths.table(ident),
Map::of,
tableFileIO(context, tableConf, response.credentials()),
kmsClient,
response.tableMetadata(),
endpoints);

Expand Down Expand Up @@ -793,6 +804,7 @@ public Table create() {
paths.table(ident),
Map::of,
tableFileIO(context, tableConf, response.credentials()),
kmsClient,
response.tableMetadata(),
endpoints);

Expand Down Expand Up @@ -820,6 +832,7 @@ public Transaction createTransaction() {
paths.table(ident),
Map::of,
tableFileIO(context, tableConf, response.credentials()),
kmsClient,
RESTTableOperations.UpdateType.CREATE,
createChanges(meta),
meta,
Expand Down Expand Up @@ -883,6 +896,7 @@ public Transaction replaceTransaction() {
paths.table(ident),
Map::of,
tableFileIO(context, tableConf, response.credentials()),
kmsClient,
RESTTableOperations.UpdateType.REPLACE,
changes.build(),
base,
Expand Down
126 changes: 124 additions & 2 deletions core/src/main/java/org/apache/iceberg/rest/RESTTableOperations.java
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.iceberg.LocationProviders;
import org.apache.iceberg.MetadataUpdate;
import org.apache.iceberg.SnapshotRef;
Expand All @@ -32,17 +33,25 @@
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.UpdateRequirement;
import org.apache.iceberg.UpdateRequirements;
import org.apache.iceberg.encryption.EncryptedKey;
import org.apache.iceberg.encryption.EncryptingFileIO;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.encryption.EncryptionUtil;
import org.apache.iceberg.encryption.KeyManagementClient;
import org.apache.iceberg.encryption.PlaintextEncryptionManager;
import org.apache.iceberg.encryption.StandardEncryptionManager;
import org.apache.iceberg.exceptions.CommitStateUnknownException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.rest.requests.UpdateTableRequest;
import org.apache.iceberg.rest.responses.ErrorResponse;
import org.apache.iceberg.rest.responses.LoadTableResponse;
import org.apache.iceberg.util.LocationUtil;
import org.apache.iceberg.util.PropertyUtil;

class RESTTableOperations implements TableOperations {
private static final String METADATA_FOLDER_NAME = "metadata";
Expand All @@ -57,27 +66,45 @@ enum UpdateType {
private final String path;
private final Supplier<Map<String, String>> headers;
private final FileIO io;
private final KeyManagementClient kmsClient;
private final List<MetadataUpdate> createChanges;
private final TableMetadata replaceBase;
private final Set<Endpoint> endpoints;
private UpdateType updateType;
private TableMetadata current;

private EncryptionManager encryptionManager;
private EncryptingFileIO encryptingFileIO;
private String tableKeyId;
private int encryptionDekLength;
private List<EncryptedKey> encryptedKeysFromMetadata;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please also factor in the #14427 changes

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, I'll need to think about whether those changes are needed here actually, given the metadata file equality check in updateCurrentMetadata (test passes).

I suspect it's best to have such changes so when there's new metadata, the old keys are kept around. Will have to think if there's a case for this though


RESTTableOperations(
RESTClient client,
String path,
Supplier<Map<String, String>> headers,
FileIO io,
KeyManagementClient kmsClient,
TableMetadata current,
Set<Endpoint> endpoints) {
this(client, path, headers, io, UpdateType.SIMPLE, Lists.newArrayList(), current, endpoints);
this(
client,
path,
headers,
io,
kmsClient,
UpdateType.SIMPLE,
Lists.newArrayList(),
current,
endpoints);
}

RESTTableOperations(
RESTClient client,
String path,
Supplier<Map<String, String>> headers,
FileIO io,
KeyManagementClient kmsClient,
UpdateType updateType,
List<MetadataUpdate> createChanges,
TableMetadata current,
Expand All @@ -86,6 +113,7 @@ enum UpdateType {
this.path = path;
this.headers = headers;
this.io = io;
this.kmsClient = kmsClient;
this.updateType = updateType;
this.createChanges = createChanges;
this.replaceBase = current;
Expand All @@ -95,6 +123,10 @@ enum UpdateType {
this.current = current;
}
this.endpoints = endpoints;

// N.B. We don't use this.current because for tables-to-be-created, because it would be null,
// and we still want encrypted properties in this case for its TableOperations.
encryptionPropsFromMetadata(current);
}

@Override
Expand All @@ -112,6 +144,21 @@ public TableMetadata refresh() {
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
Endpoint.check(endpoints, Endpoint.V1_UPDATE_TABLE);

if (encryption() instanceof StandardEncryptionManager) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it possible to add this update to the metadata before we call commit ?
per API contract we should try commit metadata

* Replace the base table metadata with a new version.
*
* <p>This method should implement and document atomicity guarantees.
*
* <p>Implementations must check that the base metadata is current to avoid overwriting updates.
* Once the atomic commit operation succeeds, implementations must not perform any operations that
* may fail because failure in this method cannot be distinguished from commit failure.
*
* <p>Implementations must throw a {@link
* org.apache.iceberg.exceptions.CommitStateUnknownException} in cases where it cannot be
* determined if the commit succeeded or failed. For example if a network partition causes the
* confirmation of the commit to be lost, the implementation should throw a
* CommitStateUnknownException. This is important because downstream users of this API need to
* know whether they can clean up the commit or not, if the state is unknown then it is not safe
* to remove any files. All other exceptions will be treated as if the commit has failed.
*
* @param base table metadata on which changes were based
* @param metadata new table metadata with updates
*/

Copy link
Contributor

@ggershinsky ggershinsky Nov 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A couple of points,

  1. The stated requirement is to "check that the base metadata is current to avoid overwriting updates". This is met. We add a missing field to the new metadata after the check.
  2. There are lots of calls to commit from various classes. Moving this outside will be technically hard (if it works at all).

// Add encryption keys to the to-be-committed metadata
TableMetadata.Builder builder = TableMetadata.buildFrom(metadata);
for (Map.Entry<String, EncryptedKey> entry :
EncryptionUtil.encryptionKeys(encryption()).entrySet()) {
builder.addEncryptionKey(entry.getValue());
Copy link
Contributor Author

@smaheshwar-pltr smaheshwar-pltr Oct 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Pointing out that this adds the required REST updates to the metadata to be committed. I think that additional REST requirements for these keys are not needed

}
commitInternal(base, builder.build());
} else {
commitInternal(base, metadata);
}
}

private void commitInternal(TableMetadata base, TableMetadata metadata) {
Consumer<ErrorResponse> errorHandler;
List<UpdateRequirement> requirements;
List<MetadataUpdate> updates;
Expand Down Expand Up @@ -152,6 +199,18 @@ public void commit(TableMetadata base, TableMetadata metadata) {
String.format("Update type %s is not supported", updateType));
}

if (base != null) {
Set<String> removedProps =
base.properties().keySet().stream()
.filter(key -> !metadata.properties().containsKey(key))
.collect(Collectors.toSet());

if (removedProps.contains(TableProperties.ENCRYPTION_TABLE_KEY)) {
throw new IllegalArgumentException(
"Cannot remove encryption key ID from an encrypted table");
}
}

UpdateTableRequest request = new UpdateTableRequest(requirements, updates);

// the error handler will throw necessary exceptions like CommitFailedException and
Expand Down Expand Up @@ -201,7 +260,44 @@ private boolean reconcileOnSimpleUpdate(

@Override
public FileIO io() {
return io;
if (tableKeyId == null) {
return io;
}

if (encryptingFileIO == null) {
encryptingFileIO = EncryptingFileIO.combine(io, encryption());
}

return encryptingFileIO;
}

@Override
public EncryptionManager encryption() {
if (encryptionManager != null) {
return encryptionManager;
}

if (tableKeyId != null) {
if (kmsClient == null) {
throw new RuntimeException(
"Cannot create encryption manager without a key management client");
}

Map<String, String> encryptionProperties =
ImmutableMap.of(
TableProperties.ENCRYPTION_TABLE_KEY,
tableKeyId,
TableProperties.ENCRYPTION_DEK_LENGTH,
String.valueOf(encryptionDekLength));

encryptionManager =
EncryptionUtil.createEncryptionManager(
encryptedKeysFromMetadata, encryptionProperties, kmsClient);
} else {
return PlaintextEncryptionManager.instance();
}

return encryptionManager;
}

private static Long expectedSnapshotIdIfSnapshotAddOnly(List<MetadataUpdate> updates) {
Expand Down Expand Up @@ -241,13 +337,39 @@ private static Long expectedSnapshotIdIfSnapshotAddOnly(List<MetadataUpdate> upd
return addedSnapshotId;
}

private void encryptionPropsFromMetadata(TableMetadata metadata) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this method applied on the TableMetadata that is fetched directly from the REST catalog, and not from the metadata.json file? Both are possible, but the former must override (and check) the latter, to protect against the key removal and other attacks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes this method will always be applied on a metadata field of a LoadTableResponse object received directly from the REST catalog (its only usage within this class is as such, and you can check the constructor usages within RESTSessionCatalog to confirm that the metadata coming in from there is as such too.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question (not sure if there have been discussions here or if your team have thoughts): we want the key ID to come from the REST catalog service directly for security reasons.

It's typical for REST catalogs to provide metadata that corresponds to the metadata file in storage and not modify it apart from that. Given this, would it be preferable to have this field returned within the LoadTableResponse itself, to encourage catalogs to track it explicitly?

The concrete proposal here might be: ENCRYPTION_TABLE_KEY and ENCRYPTION_DEK_LENGTH become properties on the LoadTableResponse's config (mentioned in the REST spec here).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well I see two scenarios when thinking about this:

  1. metadata.json is something that both the server and the clients can read (although clients wouldn't need to, given they get the metadata with the LoadTableResponse )
  2. metadata.json can only be accessed on the server side and clients are not given FS credentials (either vended or not) to reach it

For case (1) I totally agree, we can't rely on just metadata.json to store these encryption properties, and the catalog should store it separately too, and eventually populating (i.e. doing the override logic referred by @ggershinsky) the properties in the LoadTableResponse to be created.
For case (2) I'm not 100% sure, but still leaning toward the catalog taking on this responsibility.

Either way, for the client side there's not much we can do other than recommending clients to consider the metadata from LoadTableResponse only. The rest (no pun intended) is on the server side to be decided and will be implementation-specific. For this code snippet above, irrelevant IMHO.

Let me know your thoughts.

Copy link
Contributor

@ggershinsky ggershinsky Nov 2, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

REST server implementations out there who cache the whole metadata.json in memory / db to avoid lookup to remote store.

Looks like these implementations are safe for table encryption.

concern for malicious clients

In the Iceberg encryption threat model, the storage nodes are untrusted, and the compute nodes (inc catalog client) are trusted, so this is ok.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

there are REST server implementations out there who cache the whole metadata.json in memory / db

I'm curious how these implementations handle the metadata-location field, especially in the context of https://github.com/apache/iceberg/blob/main/open-api/rest-catalog-open-api.yaml#L3284 ? Do they work with virtual values?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm curious how these implementations handle the metadata-location field,

Most of the implementations that i am aware of are maintaining an in-memory copy of what the catalog wrote to the object store, some what like a write-through cache as the object store reads are expensive.
But IMHO no one stops the user to actually store the metadata.json in a db (without persisting in object store) and play along with the virtual values as most of the readers play read the LoadTableResponse#metadata() and ignore this field as its a redundant call, this will particular become a problem when the reader wants the actual metadata.json to use in something like register_table call.

My understanding is we stored the keys info in HiveCatalog impl in the metadata.json as part of table properties which means its a defined property per iceberg table spec irrespective of what catalog one use, so imho we should ideally use the same for REST

Copy link
Contributor Author

@smaheshwar-pltr smaheshwar-pltr Nov 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks all. I agree with @singhpk234 on metadata JSONs residing in object storage, though perhaps there are larger discussions to be had here about whether it's necessary, and I suppose the suggestion of encrypting metadata JSONs might limit the portability it would bring.

Encryption properties is a part of problem. It is the biggest part (can be exploited for data leaks), but another part are all other fields in the metadata - since they can be manipulated for replay attacks and other stuff that breaks the table integrity.

@ggershinsky, to aid my understanding here: for the Hive case it's only the table properties that are stored in HMS, right? Is there a follow-up there or is this sufficient for the threat model? If so, I think we're back to the REST catalog checking those table properties are correct before returning them (I feel that config is generally good for things like this, but @singhpk234 raises a good point about it being a table property).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for the Hive case it's only the table properties that are stored in HMS, right?

Correct. That's why the Hive catalog provides partial security (data confidentiality only; not integrity). REST catalogs can provide full security for encrypted tables.

Is there a follow-up there or is this sufficient for the threat model?

Afaik, HMS is not optimized for JSON storage. But maybe someone in the community will take on storing the full metadata object there, to improve table security. Having only the table properties is barely sufficient. I think we should recommend REST catalogs for encrypted tables.

REST catalog checking those table properties are correct before returning them

To check this, the REST catalog would need to keep these properties in an independent storage / db? If so, maybe the catalog can just keep the whole metadata object there? (as a json file in an independent storage or as an object in a DB/store)

Ok, here's my current understanding of the advantages of each approach - all corrections/additions are welcome.

Storing the encryption properties

  • implementation: it might be easier to store two properties instead of a metadata object (in certain databases, backing the catalog)

Storing the whole metadata object

  • better security (table integrity is guaranteed)
  • implementation: it might be easier to store the whole metadata object (in object stores /nosql db's /regular file systems, backing the catalog)
  • REST spec addition is concise, simple and easy to understand (see the first pass at REST spec: Metadata integrity for encrypted tables #14486). An implementation is either "secure" or not. Going deep into threat model levels and encryption property details will make the addition much bigger and more complex; might be out of place for this spec.
    (also, a simple security spec means a lower risk of wrong implementations / security vulnerabilities)

if (metadata == null || metadata.properties() == null) {
return;
}

encryptedKeysFromMetadata = metadata.encryptionKeys();

Map<String, String> tableProperties = metadata.properties();
if (tableKeyId == null) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shall we always get tableKeyId in case there is a key rotation?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose we can always get it from the properties here though I suspect automatic key rotation often keeps key ID the same so perhaps reasonable to require that (otherwise would imagine you'd need tasks to commit new metadata with changed properties on rotation), cc @ggershinsky curious for thoughts / if there've been discussions on this?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if the tableKeyId == null (here and below), it just means the table is not encrypted.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could be wrong, but I understood @huaxingao's comment as asking whether we should re-fetch encryption-related properties from the catalog on refresh / commit.

I believe we don't need to, and that's aligned with not updating configs on refresh / commit in this class prior to this PR. The key rotation case should even be fine because it'll generally keep the ID the same.

tableKeyId = tableProperties.get(TableProperties.ENCRYPTION_TABLE_KEY);
}

if (tableKeyId != null && encryptionDekLength <= 0) {
encryptionDekLength =
PropertyUtil.propertyAsInt(
tableProperties,
TableProperties.ENCRYPTION_DEK_LENGTH,
TableProperties.ENCRYPTION_DEK_LENGTH_DEFAULT);
}

// Force re-creation of encryptingFileIO and encryptionManager
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

encryptingFileIO = null;
encryptionManager = null;
}

private TableMetadata updateCurrentMetadata(LoadTableResponse response) {
// LoadTableResponse is used to deserialize the response, but config is not allowed by the REST
// spec so it can be
// safely ignored. there is no requirement to update config on refresh or commit.
if (current == null
|| !Objects.equals(current.metadataFileLocation(), response.metadataLocation())) {
this.current = checkUUID(current, response.tableMetadata());
encryptionPropsFromMetadata(this.current);
}

return current;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.apache.iceberg.encryption.UnitestKMS;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Streams;
import org.apache.iceberg.spark.CatalogTestBase;
Expand All @@ -57,6 +58,15 @@ protected static Object[][] parameters() {
SparkCatalogConfig.HIVE.catalogName(),
SparkCatalogConfig.HIVE.implementation(),
appendCatalogEncryptionProperties(SparkCatalogConfig.HIVE.properties())
},
{
SparkCatalogConfig.REST.catalogName(),
SparkCatalogConfig.REST.implementation(),
appendCatalogEncryptionProperties(
ImmutableMap.<String, String>builder()
.putAll(SparkCatalogConfig.REST.properties())
.put(CatalogProperties.URI, restCatalog.properties().get(CatalogProperties.URI))
.build())
}
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import org.apache.iceberg.io.SeekableInputStream;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Streams;
import org.apache.iceberg.spark.CatalogTestBase;
Expand All @@ -69,6 +70,15 @@ protected static Object[][] parameters() {
SparkCatalogConfig.HIVE.catalogName(),
SparkCatalogConfig.HIVE.implementation(),
appendCatalogEncryptionProperties(SparkCatalogConfig.HIVE.properties())
},
{
SparkCatalogConfig.REST.catalogName(),
SparkCatalogConfig.REST.implementation(),
appendCatalogEncryptionProperties(
ImmutableMap.<String, String>builder()
.putAll(SparkCatalogConfig.REST.properties())
.put(CatalogProperties.URI, restCatalog.properties().get(CatalogProperties.URI))
.build())
}
};
}
Expand Down Expand Up @@ -105,8 +115,8 @@ private static List<DataFile> currentDataFiles(Table table) {

@TestTemplate
public void testRefresh() {
catalog.initialize(catalogName, catalogConfig);
Table table = catalog.loadTable(tableIdent);
validationCatalog.initialize(catalogName, catalogConfig);
Table table = validationCatalog.loadTable(tableIdent);

assertThat(currentDataFiles(table)).isNotEmpty();

Expand All @@ -118,20 +128,23 @@ public void testRefresh() {

@TestTemplate
public void testTransaction() {
catalog.initialize(catalogName, catalogConfig);

Table table = catalog.loadTable(tableIdent);
validationCatalog.initialize(catalogName, catalogConfig);
Table table = validationCatalog.loadTable(tableIdent);

List<DataFile> dataFiles = currentDataFiles(table);
Transaction transaction = table.newTransaction();
AppendFiles append = transaction.newAppend();

// add an arbitrary datafile
append.appendFile(dataFiles.get(0));

// append to the table in the meantime. use a separate load to avoid shared operations
validationCatalog.loadTable(tableIdent).newFastAppend().appendFile(dataFiles.get(0)).commit();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@szlta , could you have a quick look at this addition?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, this does look good to me


append.commit();
transaction.commitTransaction();

assertThat(currentDataFiles(table).size()).isEqualTo(dataFiles.size() + 1);
assertThat(currentDataFiles(table).size()).isEqualTo(dataFiles.size() + 2);
}

@TestTemplate
Expand Down