Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.common.DynConstructors;
import org.apache.iceberg.common.DynMethods;
import org.apache.iceberg.hadoop.Util;
Expand Down Expand Up @@ -193,6 +194,10 @@ public Table getTable(String dbName, String tableName) throws TException, Interr
return clientPool.run(client -> client.getTable(dbName, tableName));
}

public Table getTable(TableIdentifier identifier) throws TException, InterruptedException {
return getTable(identifier.namespace().toString(), identifier.name());
}

private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf conf) throws Exception {
HiveConf serverConf = new HiveConf(conf);
serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,18 @@ public void preDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) {
"TRUE".equalsIgnoreCase(hmsTable.getParameters().get(InputFormatConfig.EXTERNAL_TABLE_PURGE));

if (deleteIcebergTable && Catalogs.hiveCatalog(conf, catalogProperties)) {
// Store the metadata and the id for deleting the actual table data
String metadataLocation = hmsTable.getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
this.deleteIo = Catalogs.loadTable(conf, catalogProperties).io();
this.deleteMetadata = TableMetadataParser.read(deleteIo, metadataLocation);
// Store the metadata and the io for deleting the actual table data
try {
String metadataLocation = hmsTable.getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
this.deleteIo = Catalogs.loadTable(conf, catalogProperties).io();
this.deleteMetadata = TableMetadataParser.read(deleteIo, metadataLocation);
} catch (Exception e) {
LOG.error("preDropTable: Error during loading Iceberg table or parsing its metadata for HMS table: {}.{}. " +
"In some cases, this might lead to undeleted metadata files under the table directory: {}. " +
"Please double check and, if needed, manually delete any dangling files/folders, if any. " +
"In spite of this error, the HMS table drop operation should proceed as normal.",
hmsTable.getDbName(), hmsTable.getTableName(), hmsTable.getSd().getLocation(), e);
}
}
}

Expand All @@ -178,7 +186,7 @@ public void commitDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable,
Catalogs.dropTable(conf, catalogProperties);
} else {
// do nothing if metadata folder has been deleted already (Hive 4 behaviour for purge=TRUE)
if (deleteIo.newInputFile(deleteMetadata.location()).exists()) {
if (deleteMetadata != null && deleteIo.newInputFile(deleteMetadata.location()).exists()) {
CatalogUtil.dropTableData(deleteIo, deleteMetadata);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,34 @@ public void testDeleteBackingTable() throws TException, IOException, Interrupted
}
}

@Test
public void testDropTableWithCorruptedMetadata() throws TException, IOException, InterruptedException {
Assume.assumeTrue("Only HiveCatalog attempts to load the Iceberg table prior to dropping it.",
testTableType == TestTables.TestTableType.HIVE_CATALOG);

// create test table
TableIdentifier identifier = TableIdentifier.of("default", "customers");
testTables.createTable(shell, identifier.name(),
HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, FileFormat.PARQUET, ImmutableList.of());

// enable data purging (this should set external.table.purge=true on the HMS table)
Table table = testTables.loadTable(identifier);
table.updateProperties().set(GC_ENABLED, "true").commit();

// delete its current snapshot file (i.e. corrupt the metadata to make the Iceberg table unloadable)
String metadataLocation = shell.metastore().getTable(identifier)
.getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
table.io().deleteFile(metadataLocation);

// check if HMS table is nonetheless still droppable
shell.executeStatement(String.format("DROP TABLE %s", identifier));
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class,
"Table does not exist", () -> {
testTables.loadTable(identifier);
}
);
}

@Test
public void testCreateTableError() {
TableIdentifier identifier = TableIdentifier.of("default", "withShell2");
Expand Down