diff --git a/build.gradle b/build.gradle index 679ffc6f2fc8..dfa02f22444d 100644 --- a/build.gradle +++ b/build.gradle @@ -719,6 +719,7 @@ project(':iceberg-hive-metastore') { } testImplementation project(path: ':iceberg-api', configuration: 'testArtifacts') + testImplementation project(path: ':iceberg-core', configuration: 'testArtifacts') testImplementation libs.awaitility } } diff --git a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java index e01deebf5bc5..00f3739e2b9e 100644 --- a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java +++ b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java @@ -164,6 +164,10 @@ protected boolean supportsNamesWithSlashes() { return true; } + protected boolean supportsNamesWithDot() { + return true; + } + @Test public void testCreateNamespace() { C catalog = catalog(); @@ -470,6 +474,8 @@ public void testNamespaceWithSlash() { @Test public void testNamespaceWithDot() { + Assumptions.assumeTrue(supportsNamesWithDot()); + C catalog = catalog(); Namespace withDot = Namespace.of("new.db"); @@ -547,6 +553,8 @@ public void testTableNameWithSlash() { @Test public void testTableNameWithDot() { + Assumptions.assumeTrue(supportsNamesWithDot()); + C catalog = catalog(); TableIdentifier ident = TableIdentifier.of("ns", "ta.ble"); diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java index 22f5b0b5cf37..33954c7f792c 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java @@ -261,6 +261,15 @@ public void renameTable(TableIdentifier from, TableIdentifier originalTo) { } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted in call to rename", e); + } catch (RuntimeException e) { + // in case of table already exists, + // Hive rename operation throws exception as + // java.lang.RuntimeException:InvalidOperationException(message:new table <> already exists) + if (e.getMessage().contains(String.format("new table %s already exists)", to))) { + throw new org.apache.iceberg.exceptions.AlreadyExistsException( + "Table already exists: %s", to); + } + throw new RuntimeException("Failed to rename " + from + " to " + to, e); } } @@ -288,7 +297,7 @@ public void createNamespace(Namespace namespace, Map meta) { } catch (AlreadyExistsException e) { throw new org.apache.iceberg.exceptions.AlreadyExistsException( - e, "Namespace '%s' already exists!", namespace); + e, "Namespace already exists: %s", namespace); } catch (TException e) { throw new RuntimeException( @@ -500,6 +509,9 @@ protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) { return String.format("%s/%s", databaseData.getLocationUri(), tableIdentifier.name()); } + } catch (NoSuchObjectException e) { + throw new NoSuchNamespaceException( + e, "Namespace does not exist: %s", tableIdentifier.namespace().levels()[0]); } catch (TException e) { throw new RuntimeException( String.format("Metastore operation failed for %s", tableIdentifier), e); diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java index f4b96822d42c..34ef1a796106 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java @@ -217,7 +217,7 @@ protected void doCommit(TableMetadata base, TableMetadata metadata) { String baseMetadataLocation = base != null ? base.metadataFileLocation() : null; if (!Objects.equals(baseMetadataLocation, metadataLocation)) { throw new CommitFailedException( - "Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s", + "Cannot commit, Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s", baseMetadataLocation, metadataLocation, database, tableName); } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java new file mode 100644 index 000000000000..de54735232ab --- /dev/null +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.hive; + +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +public final class HiveMetastoreExtension implements AfterEachCallback, BeforeEachCallback { + + HiveCatalog catalog; + HiveMetaStoreClient metastoreClient; + TestHiveMetastore metastore; + HiveConf hiveConf; + private final Map hiveConfOverride; + static final String DB_NAME = "hivedb"; + + public HiveMetastoreExtension(Map hiveConfOverride) { + this.hiveConfOverride = hiveConfOverride; + } + + @Override + public void beforeEach(ExtensionContext extensionContext) throws Exception { + this.metastore = new TestHiveMetastore(); + HiveConf hiveConfWithOverrides = new HiveConf(TestHiveMetastore.class); + if (hiveConfOverride != null) { + for (Map.Entry kv : hiveConfOverride.entrySet()) { + hiveConfWithOverrides.set(kv.getKey(), kv.getValue()); + } + } + + metastore.start(hiveConfWithOverrides); + this.hiveConf = metastore.hiveConf(); + this.metastoreClient = new HiveMetaStoreClient(hiveConfWithOverrides); + + String dbPath = metastore.getDatabasePath(DB_NAME); + Database db = new Database(DB_NAME, "description", dbPath, Maps.newHashMap()); + metastoreClient.createDatabase(db); + + this.catalog = + (HiveCatalog) + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(TimeUnit.SECONDS.toMillis(10))), + hiveConfWithOverrides); + } + + @Override + public void afterEach(ExtensionContext extensionContext) throws Exception { + this.catalog = null; + metastoreClient.close(); + this.metastoreClient = null; + metastore.stop(); + this.metastore = null; + } +} diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java index e48df0ce9378..5c92a44eb429 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java @@ -31,6 +31,10 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +/* + * This meta-setup has been deprecated use {@link HiveMetastoreExtension} instead. + * */ +@Deprecated public abstract class HiveMetastoreTest { protected static final String DB_NAME = "hivedb"; diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index 7ff2bd78a665..82e8f5204e0f 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -28,6 +28,7 @@ import static org.apache.iceberg.TableProperties.DEFAULT_SORT_ORDER; import static org.apache.iceberg.TableProperties.SNAPSHOT_COUNT; import static org.apache.iceberg.expressions.Expressions.bucket; +import static org.apache.iceberg.hive.HiveMetastoreExtension.DB_NAME; import static org.apache.iceberg.types.Types.NestedField.required; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatNoException; @@ -37,6 +38,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -68,6 +70,7 @@ import org.apache.iceberg.Transaction; import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.CatalogTests; import org.apache.iceberg.catalog.Namespace; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.exceptions.AlreadyExistsException; @@ -83,11 +86,18 @@ import org.apache.iceberg.util.JsonUtil; import org.apache.thrift.TException; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -public class TestHiveCatalog extends HiveMetastoreTest { +/** + * Run all the tests from abstract of {@link CatalogTests}. Also, a few specific tests for HIVE too. + * There could be some duplicated tests that are already being covered with {@link CatalogTests} + * //TODO: remove duplicate tests with {@link CatalogTests}.Also use the DB/TABLE/SCHEMA from {@link + * CatalogTests} + */ +public class TestHiveCatalog extends CatalogTests { private static ImmutableMap meta = ImmutableMap.of( "owner", "apache", @@ -96,6 +106,30 @@ public class TestHiveCatalog extends HiveMetastoreTest { @TempDir private Path temp; + @RegisterExtension + public static final HiveMetastoreExtension hiveMetastoreExtension = + new HiveMetastoreExtension(Collections.emptyMap()); + + @Override + protected boolean requiresNamespaceCreate() { + return true; + } + + @Override + protected boolean supportsNamesWithSlashes() { + return false; + } + + @Override + protected boolean supportsNamesWithDot() { + return false; + } + + @Override + protected HiveCatalog catalog() { + return hiveMetastoreExtension.catalog; + } + private Schema getTestSchema() { return new Schema( required(1, "id", Types.IntegerType.get(), "unique ID"), @@ -111,7 +145,8 @@ public void testCreateTableBuilder() throws Exception { try { Table table = - catalog + hiveMetastoreExtension + .catalog .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withLocation(location) @@ -130,7 +165,7 @@ public void testCreateTableBuilder() throws Exception { TableProperties.PARQUET_COMPRESSION, TableProperties.PARQUET_COMPRESSION_DEFAULT_SINCE_1_4_0); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -141,7 +176,7 @@ public void testCreateTableWithCaching() throws Exception { TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); ImmutableMap properties = ImmutableMap.of("key1", "value1", "key2", "value2"); - Catalog cachingCatalog = CachingCatalog.wrap(catalog); + Catalog cachingCatalog = CachingCatalog.wrap(hiveMetastoreExtension.catalog); try { Table table = cachingCatalog.createTable(tableIdent, schema, spec, location, properties); @@ -202,15 +237,19 @@ public void testCreateTableTxnBuilder() throws Exception { try { Transaction txn = - catalog.buildTable(tableIdent, schema).withLocation(location).createTransaction(); + hiveMetastoreExtension + .catalog + .buildTable(tableIdent, schema) + .withLocation(location) + .createTransaction(); txn.commitTransaction(); - Table table = catalog.loadTable(tableIdent); + Table table = hiveMetastoreExtension.catalog.loadTable(tableIdent); assertThat(table.location()).isEqualTo(location); assertThat(table.schema().columns()).hasSize(2); assertThat(table.spec().isUnpartitioned()).isTrue(); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -224,7 +263,8 @@ public void testReplaceTxnBuilder(int formatVersion) { try { Transaction createTxn = - catalog + hiveMetastoreExtension + .catalog .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withLocation(location) @@ -233,20 +273,21 @@ public void testReplaceTxnBuilder(int formatVersion) { .createOrReplaceTransaction(); createTxn.commitTransaction(); - Table table = catalog.loadTable(tableIdent); + Table table = hiveMetastoreExtension.catalog.loadTable(tableIdent); assertThat(table.spec().fields()).hasSize(1); String newLocation = temp.resolve("tbl-2").toString(); Transaction replaceTxn = - catalog + hiveMetastoreExtension + .catalog .buildTable(tableIdent, schema) .withProperty("key2", "value2") .withLocation(newLocation) .replaceTransaction(); replaceTxn.commitTransaction(); - table = catalog.loadTable(tableIdent); + table = hiveMetastoreExtension.catalog.loadTable(tableIdent); assertThat(table.location()).isEqualTo(newLocation); assertThat(table.currentSnapshot()).isNull(); if (formatVersion == 1) { @@ -265,7 +306,7 @@ public void testReplaceTxnBuilder(int formatVersion) { assertThat(table.properties()).containsEntry("key1", "value1"); assertThat(table.properties()).containsEntry("key2", "value2"); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -291,13 +332,16 @@ private void createTableAndVerifyOwner( TableIdentifier tableIdent = TableIdentifier.of(db, tbl); String location = temp.resolve(tbl).toString(); try { - Table table = catalog.createTable(tableIdent, schema, spec, location, properties); - org.apache.hadoop.hive.metastore.api.Table hmsTable = metastoreClient.getTable(db, tbl); + Table table = + hiveMetastoreExtension.catalog.createTable( + tableIdent, schema, spec, location, properties); + org.apache.hadoop.hive.metastore.api.Table hmsTable = + hiveMetastoreExtension.metastoreClient.getTable(db, tbl); assertThat(hmsTable.getOwner()).isEqualTo(owner); Map hmsTableParams = hmsTable.getParameters(); assertThat(hmsTableParams).doesNotContainKey(HiveCatalog.HMS_TABLE_OWNER); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -308,7 +352,7 @@ public void testCreateTableDefaultSortOrder() throws Exception { TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { - Table table = catalog.createTable(tableIdent, schema, spec); + Table table = hiveMetastoreExtension.catalog.createTable(tableIdent, schema, spec); assertThat(table.sortOrder().orderId()).as("Order ID must match").isEqualTo(0); assertThat(table.sortOrder().isUnsorted()).as("Order must unsorted").isTrue(); @@ -316,7 +360,7 @@ public void testCreateTableDefaultSortOrder() throws Exception { .as("Must not have default sort order in catalog") .doesNotContainKey(DEFAULT_SORT_ORDER); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -329,7 +373,8 @@ public void testCreateTableCustomSortOrder() throws Exception { try { Table table = - catalog + hiveMetastoreExtension + .catalog .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withSortOrder(order) @@ -349,26 +394,26 @@ public void testCreateTableCustomSortOrder() throws Exception { assertThat(hmsTableParameters()) .containsEntry(DEFAULT_SORT_ORDER, SortOrderParser.toJson(table.sortOrder())); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @Test - public void testCreateNamespace() throws Exception { + public void testDatabaseAndNamespaceWithLocation() throws Exception { Namespace namespace1 = Namespace.of("noLocation"); - catalog.createNamespace(namespace1, meta); - Database database1 = metastoreClient.getDatabase(namespace1.toString()); + hiveMetastoreExtension.catalog.createNamespace(namespace1, meta); + Database database1 = hiveMetastoreExtension.metastoreClient.getDatabase(namespace1.toString()); assertThat(database1.getParameters()).containsEntry("owner", "apache"); assertThat(database1.getParameters()).containsEntry("group", "iceberg"); assertThat(defaultUri(namespace1)) - .as("There no same location for db and namespace") + .as("Database and namespace don't have the same location") .isEqualTo(database1.getLocationUri()); - assertThatThrownBy(() -> catalog.createNamespace(namespace1)) + assertThatThrownBy(() -> hiveMetastoreExtension.catalog.createNamespace(namespace1)) .isInstanceOf(AlreadyExistsException.class) - .hasMessage("Namespace '" + namespace1 + "' already exists!"); + .hasMessage(String.format("Namespace already exists: %s", namespace1)); String hiveLocalDir = temp.toFile().toURI().toString(); // remove the trailing slash of the URI hiveLocalDir = hiveLocalDir.substring(0, hiveLocalDir.length() - 1); @@ -379,10 +424,10 @@ public void testCreateNamespace() throws Exception { .buildOrThrow(); Namespace namespace2 = Namespace.of("haveLocation"); - catalog.createNamespace(namespace2, newMeta); - Database database2 = metastoreClient.getDatabase(namespace2.toString()); + hiveMetastoreExtension.catalog.createNamespace(namespace2, newMeta); + Database database2 = hiveMetastoreExtension.metastoreClient.getDatabase(namespace2.toString()); assertThat(hiveLocalDir) - .as("There no same location for db and namespace") + .as("Database and namespace don't have the same location") .isEqualTo(database2.getLocationUri()); } @@ -459,8 +504,8 @@ private void createNamespaceAndVerifyOwnership( throws TException { Namespace namespace = Namespace.of(name); - catalog.createNamespace(namespace, prop); - Database db = metastoreClient.getDatabase(namespace.toString()); + hiveMetastoreExtension.catalog.createNamespace(namespace, prop); + Database db = hiveMetastoreExtension.metastoreClient.getDatabase(namespace.toString()); assertThat(db.getOwnerName()).isEqualTo(expectedOwner); assertThat(db.getOwnerType()).isEqualTo(expectedOwnerType); @@ -470,13 +515,13 @@ private void createNamespaceAndVerifyOwnership( public void testListNamespace() throws TException { List namespaces; Namespace namespace1 = Namespace.of("dbname1"); - catalog.createNamespace(namespace1, meta); - namespaces = catalog.listNamespaces(namespace1); + hiveMetastoreExtension.catalog.createNamespace(namespace1, meta); + namespaces = hiveMetastoreExtension.catalog.listNamespaces(namespace1); assertThat(namespaces).as("Hive db not hive the namespace 'dbname1'").isEmpty(); Namespace namespace2 = Namespace.of("dbname2"); - catalog.createNamespace(namespace2, meta); - namespaces = catalog.listNamespaces(); + hiveMetastoreExtension.catalog.createNamespace(namespace2, meta); + namespaces = hiveMetastoreExtension.catalog.listNamespaces(); assertThat(namespaces).as("Hive db not hive the namespace 'dbname2'").contains(namespace2); } @@ -485,12 +530,12 @@ public void testListNamespace() throws TException { public void testLoadNamespaceMeta() throws TException { Namespace namespace = Namespace.of("dbname_load"); - catalog.createNamespace(namespace, meta); + hiveMetastoreExtension.catalog.createNamespace(namespace, meta); - Map nameMata = catalog.loadNamespaceMetadata(namespace); + Map nameMata = hiveMetastoreExtension.catalog.loadNamespaceMetadata(namespace); assertThat(nameMata).containsEntry("owner", "apache"); assertThat(nameMata).containsEntry("group", "iceberg"); - assertThat(catalog.convertToDatabase(namespace, meta).getLocationUri()) + assertThat(hiveMetastoreExtension.catalog.convertToDatabase(namespace, meta).getLocationUri()) .as("There no same location for db and namespace") .isEqualTo(nameMata.get("location")); } @@ -499,38 +544,16 @@ public void testLoadNamespaceMeta() throws TException { public void testNamespaceExists() throws TException { Namespace namespace = Namespace.of("dbname_exists"); - catalog.createNamespace(namespace, meta); + hiveMetastoreExtension.catalog.createNamespace(namespace, meta); - assertThat(catalog.namespaceExists(namespace)).as("Should true to namespace exist").isTrue(); - assertThat(catalog.namespaceExists(Namespace.of("db2", "db2", "ns2"))) + assertThat(hiveMetastoreExtension.catalog.namespaceExists(namespace)) + .as("Should true to namespace exist") + .isTrue(); + assertThat(hiveMetastoreExtension.catalog.namespaceExists(Namespace.of("db2", "db2", "ns2"))) .as("Should false to namespace doesn't exist") .isFalse(); } - @Test - public void testSetNamespaceProperties() throws TException { - Namespace namespace = Namespace.of("dbname_set"); - - catalog.createNamespace(namespace, meta); - catalog.setProperties( - namespace, - ImmutableMap.of( - "owner", "alter_apache", - "test", "test", - "location", "file:/data/tmp", - "comment", "iceberg test")); - - Database database = metastoreClient.getDatabase(namespace.level(0)); - assertThat(database.getParameters()).containsEntry("owner", "alter_apache"); - assertThat(database.getParameters()).containsEntry("test", "test"); - assertThat(database.getParameters()).containsEntry("group", "iceberg"); - - assertThatThrownBy( - () -> catalog.setProperties(Namespace.of("db2", "db2", "ns2"), ImmutableMap.of())) - .isInstanceOf(NoSuchNamespaceException.class) - .hasMessage("Namespace does not exist: db2.db2.ns2"); - } - @Test public void testSetNamespaceOwnership() throws TException { setNamespaceOwnershipAndVerify( @@ -705,34 +728,13 @@ private void setNamespaceOwnershipAndVerify( createNamespaceAndVerifyOwnership( name, propToCreate, expectedOwnerPostCreate, expectedOwnerTypePostCreate); - catalog.setProperties(Namespace.of(name), propToSet); - Database database = metastoreClient.getDatabase(name); + hiveMetastoreExtension.catalog.setProperties(Namespace.of(name), propToSet); + Database database = hiveMetastoreExtension.metastoreClient.getDatabase(name); assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostSet); assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostSet); } - @Test - public void testRemoveNamespaceProperties() throws TException { - Namespace namespace = Namespace.of("dbname_remove"); - - catalog.createNamespace(namespace, meta); - - catalog.removeProperties(namespace, ImmutableSet.of("comment", "owner")); - - Database database = metastoreClient.getDatabase(namespace.level(0)); - - assertThat(database.getParameters()).doesNotContainKey("owner"); - assertThat(database.getParameters()).containsEntry("group", "iceberg"); - - assertThatThrownBy( - () -> - catalog.removeProperties( - Namespace.of("db2", "db2", "ns2"), ImmutableSet.of("comment", "owner"))) - .isInstanceOf(NoSuchNamespaceException.class) - .hasMessage("Namespace does not exist: db2.db2.ns2"); - } - @Test public void testRemoveNamespaceOwnership() throws TException, IOException { removeNamespaceOwnershipAndVerify( @@ -850,37 +852,38 @@ private void removeNamespaceOwnershipAndVerify( createNamespaceAndVerifyOwnership( name, propToCreate, expectedOwnerPostCreate, expectedOwnerTypePostCreate); - catalog.removeProperties(Namespace.of(name), propToRemove); + hiveMetastoreExtension.catalog.removeProperties(Namespace.of(name), propToRemove); - Database database = metastoreClient.getDatabase(name); + Database database = hiveMetastoreExtension.metastoreClient.getDatabase(name); assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostRemove); assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostRemove); } @Test - public void testDropNamespace() throws TException { + @Override + public void testDropNamespace() { Namespace namespace = Namespace.of("dbname_drop"); TableIdentifier identifier = TableIdentifier.of(namespace, "table"); Schema schema = getTestSchema(); - catalog.createNamespace(namespace, meta); - catalog.createTable(identifier, schema); - Map nameMata = catalog.loadNamespaceMetadata(namespace); + hiveMetastoreExtension.catalog.createNamespace(namespace, meta); + hiveMetastoreExtension.catalog.createTable(identifier, schema); + Map nameMata = hiveMetastoreExtension.catalog.loadNamespaceMetadata(namespace); assertThat(nameMata).containsEntry("owner", "apache"); assertThat(nameMata).containsEntry("group", "iceberg"); - assertThatThrownBy(() -> catalog.dropNamespace(namespace)) + assertThatThrownBy(() -> hiveMetastoreExtension.catalog.dropNamespace(namespace)) .isInstanceOf(NamespaceNotEmptyException.class) .hasMessage("Namespace dbname_drop is not empty. One or more tables exist."); - assertThat(catalog.dropTable(identifier, true)).isTrue(); - assertThat(catalog.dropNamespace(namespace)) + assertThat(hiveMetastoreExtension.catalog.dropTable(identifier, true)).isTrue(); + assertThat(hiveMetastoreExtension.catalog.dropNamespace(namespace)) .as("Should fail to drop namespace if it is not empty") .isTrue(); - assertThat(catalog.dropNamespace(Namespace.of("db.ns1"))) + assertThat(hiveMetastoreExtension.catalog.dropNamespace(Namespace.of("db.ns1"))) .as("Should fail to drop when namespace doesn't exist") .isFalse(); - assertThatThrownBy(() -> catalog.loadNamespaceMetadata(namespace)) + assertThatThrownBy(() -> hiveMetastoreExtension.catalog.loadNamespaceMetadata(namespace)) .isInstanceOf(NoSuchNamespaceException.class) .hasMessage("Namespace does not exist: dbname_drop"); } @@ -889,12 +892,13 @@ public void testDropNamespace() throws TException { public void testDropTableWithoutMetadataFile() { TableIdentifier identifier = TableIdentifier.of(DB_NAME, "tbl"); Schema tableSchema = getTestSchema(); - catalog.createTable(identifier, tableSchema); - String metadataFileLocation = catalog.newTableOps(identifier).current().metadataFileLocation(); - TableOperations ops = catalog.newTableOps(identifier); + hiveMetastoreExtension.catalog.createTable(identifier, tableSchema); + String metadataFileLocation = + hiveMetastoreExtension.catalog.newTableOps(identifier).current().metadataFileLocation(); + TableOperations ops = hiveMetastoreExtension.catalog.newTableOps(identifier); ops.io().deleteFile(metadataFileLocation); - assertThat(catalog.dropTable(identifier)).isTrue(); - assertThatThrownBy(() -> catalog.loadTable(identifier)) + assertThat(hiveMetastoreExtension.catalog.dropTable(identifier)).isTrue(); + assertThatThrownBy(() -> hiveMetastoreExtension.catalog.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessageContaining("Table does not exist:"); } @@ -906,23 +910,27 @@ public void testTableName() { TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { - catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).create(); + hiveMetastoreExtension + .catalog + .buildTable(tableIdent, schema) + .withPartitionSpec(spec) + .create(); - Table table = catalog.loadTable(tableIdent); + Table table = hiveMetastoreExtension.catalog.loadTable(tableIdent); assertThat(table.name()).as("Name must match").isEqualTo("hive.hivedb.tbl"); TableIdentifier snapshotsTableIdent = TableIdentifier.of(DB_NAME, "tbl", "snapshots"); - Table snapshotsTable = catalog.loadTable(snapshotsTableIdent); + Table snapshotsTable = hiveMetastoreExtension.catalog.loadTable(snapshotsTableIdent); assertThat(snapshotsTable.name()) .as("Name must match") .isEqualTo("hive.hivedb.tbl.snapshots"); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } private String defaultUri(Namespace namespace) throws TException { - return metastoreClient.getConfigValue("hive.metastore.warehouse.dir", "") + return hiveMetastoreExtension.metastoreClient.getConfigValue("hive.metastore.warehouse.dir", "") + "/" + namespace.level(0) + ".db"; @@ -935,11 +943,15 @@ public void testUUIDinTableProperties() throws Exception { String location = temp.resolve("tbl").toString(); try { - catalog.buildTable(tableIdentifier, schema).withLocation(location).create(); + hiveMetastoreExtension + .catalog + .buildTable(tableIdentifier, schema) + .withLocation(location) + .create(); assertThat(hmsTableParameters()).containsKey(TableProperties.UUID); } finally { - catalog.dropTable(tableIdentifier); + hiveMetastoreExtension.catalog.dropTable(tableIdentifier); } } @@ -950,7 +962,11 @@ public void testSnapshotStatsTableProperties() throws Exception { String location = temp.resolve("tbl").toString(); try { - catalog.buildTable(tableIdentifier, schema).withLocation(location).create(); + hiveMetastoreExtension + .catalog + .buildTable(tableIdentifier, schema) + .withLocation(location) + .create(); // check whether parameters are in expected state Map parameters = hmsTableParameters(); @@ -961,7 +977,7 @@ public void testSnapshotStatsTableProperties() throws Exception { .doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP); // create a snapshot - Table icebergTable = catalog.loadTable(tableIdentifier); + Table icebergTable = hiveMetastoreExtension.catalog.loadTable(tableIdentifier); String fileName = UUID.randomUUID().toString(); DataFile file = DataFiles.builder(icebergTable.spec()) @@ -984,7 +1000,7 @@ public void testSnapshotStatsTableProperties() throws Exception { CURRENT_SNAPSHOT_TIMESTAMP, String.valueOf(icebergTable.currentSnapshot().timestampMillis())); } finally { - catalog.dropTable(tableIdentifier); + hiveMetastoreExtension.catalog.dropTable(tableIdentifier); } } @@ -993,7 +1009,8 @@ public void testSetSnapshotSummary() throws Exception { Configuration conf = new Configuration(); conf.set("iceberg.hive.table-property-max-size", "4000"); HiveTableOperations ops = - new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl"); + new HiveTableOperations( + conf, null, null, hiveMetastoreExtension.catalog.name(), DB_NAME, "tbl"); Snapshot snapshot = mock(Snapshot.class); Map summary = Maps.newHashMap(); when(snapshot.summary()).thenReturn(summary); @@ -1026,7 +1043,8 @@ public void testNotExposeTableProperties() { Configuration conf = new Configuration(); conf.set("iceberg.hive.table-property-max-size", "0"); HiveTableOperations ops = - new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl"); + new HiveTableOperations( + conf, null, null, hiveMetastoreExtension.catalog.name(), DB_NAME, "tbl"); TableMetadata metadata = mock(TableMetadata.class); Map parameters = Maps.newHashMap(); parameters.put(CURRENT_SNAPSHOT_SUMMARY, "summary"); @@ -1058,7 +1076,7 @@ public void testSetDefaultPartitionSpec() throws Exception { TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { - Table table = catalog.buildTable(tableIdent, schema).create(); + Table table = hiveMetastoreExtension.catalog.buildTable(tableIdent, schema).create(); assertThat(hmsTableParameters()) .as("Must not have default partition spec") .doesNotContainKey(TableProperties.DEFAULT_PARTITION_SPEC); @@ -1068,7 +1086,7 @@ public void testSetDefaultPartitionSpec() throws Exception { .containsEntry( TableProperties.DEFAULT_PARTITION_SPEC, PartitionSpecParser.toJson(table.spec())); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } @@ -1078,7 +1096,7 @@ public void testSetCurrentSchema() throws Exception { TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { - Table table = catalog.buildTable(tableIdent, schema).create(); + Table table = hiveMetastoreExtension.catalog.buildTable(tableIdent, schema).create(); assertThat(hmsTableParameters()) .containsEntry(CURRENT_SCHEMA, SchemaParser.toJson(table.schema())); @@ -1093,12 +1111,13 @@ public void testSetCurrentSchema() throws Exception { assertThat(SchemaParser.toJson(table.schema()).length()).isGreaterThan(32672); assertThat(hmsTableParameters()).doesNotContainKey(CURRENT_SCHEMA); } finally { - catalog.dropTable(tableIdent); + hiveMetastoreExtension.catalog.dropTable(tableIdent); } } private Map hmsTableParameters() throws TException { - org.apache.hadoop.hive.metastore.api.Table hmsTable = metastoreClient.getTable(DB_NAME, "tbl"); + org.apache.hadoop.hive.metastore.api.Table hmsTable = + hiveMetastoreExtension.metastoreClient.getTable(DB_NAME, "tbl"); return hmsTable.getParameters(); } @@ -1131,7 +1150,7 @@ public void testTablePropsDefinedAtCatalogLevel() { HiveCatalog.class.getName(), CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, catalogProps, - hiveConf); + hiveMetastoreExtension.hiveConf); try { Table table = @@ -1183,31 +1202,33 @@ public void testDatabaseLocationWithSlashInWarehouseDir() { @Test public void testRegisterTable() { TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); - catalog.dropTable(identifier, false); + hiveMetastoreExtension.catalog.createTable(identifier, getTestSchema()); + Table registeringTable = hiveMetastoreExtension.catalog.loadTable(identifier); + hiveMetastoreExtension.catalog.dropTable(identifier, false); TableOperations ops = ((HasTableOperations) registeringTable).operations(); String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - Table registeredTable = catalog.registerTable(identifier, metadataLocation); + Table registeredTable = + hiveMetastoreExtension.catalog.registerTable(identifier, metadataLocation); assertThat(registeredTable).isNotNull(); TestHelpers.assertSerializedAndLoadedMetadata(registeringTable, registeredTable); String expectedMetadataLocation = ((HasTableOperations) registeredTable).operations().current().metadataFileLocation(); assertThat(metadataLocation).isEqualTo(expectedMetadataLocation); - assertThat(catalog.loadTable(identifier)).isNotNull(); - assertThat(catalog.dropTable(identifier)).isTrue(); + assertThat(hiveMetastoreExtension.catalog.loadTable(identifier)).isNotNull(); + assertThat(hiveMetastoreExtension.catalog.dropTable(identifier)).isTrue(); } @Test public void testRegisterExistingTable() { TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); + hiveMetastoreExtension.catalog.createTable(identifier, getTestSchema()); + Table registeringTable = hiveMetastoreExtension.catalog.loadTable(identifier); TableOperations ops = ((HasTableOperations) registeringTable).operations(); String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - assertThatThrownBy(() -> catalog.registerTable(identifier, metadataLocation)) + assertThatThrownBy( + () -> hiveMetastoreExtension.catalog.registerTable(identifier, metadataLocation)) .isInstanceOf(AlreadyExistsException.class) .hasMessage("Table already exists: hivedb.t1"); - assertThat(catalog.dropTable(identifier, true)).isTrue(); + assertThat(hiveMetastoreExtension.catalog.dropTable(identifier, true)).isTrue(); } }