Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 76 additions & 1 deletion regtests/t_pyspark/src/test_spark_sql_s3_with_privileges.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ def test_spark_credentials_can_delete_after_purge(root_client, snowflake_catalog
attempts = 0

# watch the data directory. metadata will be deleted first, so if data directory is clear, we can expect
# metadatat diretory to be clear also
# metadata directory to be clear also
while 'Contents' in objects and len(objects['Contents']) > 0 and attempts < 60:
time.sleep(1) # seconds, not milliseconds ;)
objects = s3.list_objects(Bucket=test_bucket, Delimiter='/',
Expand Down Expand Up @@ -1149,6 +1149,81 @@ def test_spark_ctas(snowflake_catalog, polaris_catalog_url, snowman):
spark.sql(f"drop table {table_name}_t2 PURGE")


@pytest.mark.skipif(os.environ.get('AWS_TEST_ENABLED', 'False').lower() != 'true',
reason='AWS_TEST_ENABLED is not set or is false')
def test_spark_credentials_s3_exception_on_metadata_file_deletion(root_client, snowflake_catalog, polaris_catalog_url,
snowman, snowman_catalog_client, test_bucket, aws_bucket_base_location_prefix):
"""
Create a using Spark. Then call the loadTable api directly with snowman token to fetch the vended credentials
for the first table.
Delete the metadata directory and try to access the table using the vended credentials.
It should throw 404 exception
:param root_client:
:param snowflake_catalog:
:param polaris_catalog_url:
:param snowman_catalog_client:
:param reader_catalog_client:
:return:
"""
with IcebergSparkSession(credentials=f'{snowman.principal.client_id}:{snowman.credentials.client_secret}',
catalog_name=snowflake_catalog.name,
polaris_url=polaris_catalog_url) as spark:
spark.sql(f'USE {snowflake_catalog.name}')
spark.sql('CREATE NAMESPACE db1')
spark.sql('CREATE NAMESPACE db1.schema')
spark.sql('USE db1.schema')
spark.sql('CREATE TABLE iceberg_table (col1 int, col2 string)')

response = snowman_catalog_client.load_table(snowflake_catalog.name, unquote('db1%1Fschema'),
"iceberg_table",
"vended-credentials")
assert response.config is not None
assert 's3.access-key-id' in response.config
assert 's3.secret-access-key' in response.config
assert 's3.session-token' in response.config

s3 = boto3.client('s3',
aws_access_key_id=response.config['s3.access-key-id'],
aws_secret_access_key=response.config['s3.secret-access-key'],
aws_session_token=response.config['s3.session-token'])

# Get metadata files
objects = s3.list_objects(Bucket=test_bucket, Delimiter='/',
Prefix=f'{aws_bucket_base_location_prefix}/snowflake_catalog/db1/schema/iceberg_table/metadata/')
assert objects is not None
assert 'Contents' in objects
assert len(objects['Contents']) > 0

# Verify metadata content
metadata_file = next(f for f in objects['Contents'] if f['Key'].endswith('metadata.json'))
assert metadata_file is not None

metadata_contents = s3.get_object(Bucket=test_bucket, Key=metadata_file['Key'])
assert metadata_contents is not None
assert metadata_contents['ContentLength'] > 0

# Delete metadata files
s3.delete_objects(Bucket=test_bucket,
Delete={'Objects': objects})

try:
response = snowman_catalog_client.load_table(snowflake_catalog.name, unquote('db1%1Fschema'),
"iceberg_table",
"vended-credentials")
except Exception as e:
assert '404' in str(e)


with IcebergSparkSession(credentials=f'{snowman.principal.client_id}:{snowman.credentials.client_secret}',
catalog_name=snowflake_catalog.name,
polaris_url=polaris_catalog_url) as spark:
spark.sql(f'USE {snowflake_catalog.name}')
spark.sql('USE db1.schema')
spark.sql('DROP TABLE iceberg_table PURGE')
spark.sql(f'USE {snowflake_catalog.name}')
spark.sql('DROP NAMESPACE db1.schema')
spark.sql('DROP NAMESPACE db1')

def create_catalog_role(api, catalog, role_name):
catalog_role = CatalogRole(name=role_name)
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ public Table registerTable(TableIdentifier identifier, String metadataFileLocati
Set.of(locationDir),
resolvedParent,
new HashMap<>(tableDefaultProperties),
Set.of(PolarisStorageActions.READ));
Set.of(PolarisStorageActions.READ, PolarisStorageActions.LIST));

InputFile metadataFile = fileIO.newInputFile(metadataFileLocation);
TableMetadata metadata = TableMetadataParser.read(fileIO, metadataFile);
Expand Down Expand Up @@ -1238,7 +1238,7 @@ public void doRefresh() {
Set.of(latestLocationDir),
resolvedEntities,
new HashMap<>(tableDefaultProperties),
Set.of(PolarisStorageActions.READ));
Set.of(PolarisStorageActions.READ, PolarisStorageActions.LIST));
return TableMetadataParser.read(fileIO, metadataLocation);
});
}
Expand Down Expand Up @@ -1274,7 +1274,10 @@ public void doCommit(TableMetadata base, TableMetadata metadata) {
getLocationsAllowedToBeAccessed(metadata),
resolvedStorageEntity,
new HashMap<>(metadata.properties()),
Set.of(PolarisStorageActions.READ, PolarisStorageActions.WRITE));
Set.of(
PolarisStorageActions.READ,
PolarisStorageActions.WRITE,
PolarisStorageActions.LIST));

List<PolarisEntity> resolvedNamespace =
resolvedTableEntities == null
Expand Down Expand Up @@ -1479,7 +1482,7 @@ public void doRefresh() {
Set.of(latestLocationDir),
resolvedEntities,
new HashMap<>(tableDefaultProperties),
Set.of(PolarisStorageActions.READ));
Set.of(PolarisStorageActions.READ, PolarisStorageActions.LIST));

return ViewMetadataParser.read(fileIO.newInputFile(metadataLocation));
});
Expand Down Expand Up @@ -2010,7 +2013,10 @@ private boolean sendNotificationForTableLike(
Set.of(locationDir),
resolvedParent,
new HashMap<>(tableDefaultProperties),
Set.of(PolarisStorageActions.READ, PolarisStorageActions.WRITE));
Set.of(
PolarisStorageActions.READ,
PolarisStorageActions.WRITE,
PolarisStorageActions.LIST));
TableMetadata tableMetadata = TableMetadataParser.read(fileIO, newLocation);

// then validate that it points to a valid location for this table
Expand Down