From 40de51279704d7bc722216085e4fa37492367651 Mon Sep 17 00:00:00 2001 From: nk1506 Date: Tue, 31 Oct 2023 15:42:37 +0530 Subject: [PATCH] Use jUnit5 based HiveMetastoreExtension with HiveTests, remove HiveMetastoreTest. --- .../hive/HiveCreateReplaceTableTest.java | 23 +- .../iceberg/hive/HiveMetastoreExtension.java | 4 + .../iceberg/hive/HiveMetastoreTest.java | 89 ------- .../iceberg/hive/HiveTableBaseTest.java | 39 +-- .../apache/iceberg/hive/HiveTableTest.java | 36 +-- .../iceberg/hive/TestCachedClientPool.java | 26 +- .../apache/iceberg/hive/TestHiveCatalog.java | 222 +----------------- .../iceberg/hive/TestHiveCommitLocks.java | 50 +++- 8 files changed, 136 insertions(+), 353 deletions(-) delete mode 100644 hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java index e46380191781..19f3dd9b7eca 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveCreateReplaceTableTest.java @@ -25,7 +25,11 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Collections; +import java.util.concurrent.TimeUnit; import org.apache.iceberg.AppendFiles; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.PartitionSpec; @@ -43,12 +47,14 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -public class HiveCreateReplaceTableTest extends HiveMetastoreTest { +public class HiveCreateReplaceTableTest { + private static final String DB_NAME = "hivedb"; private static final String TABLE_NAME = "tbl"; private static final TableIdentifier TABLE_IDENTIFIER = TableIdentifier.of(DB_NAME, TABLE_NAME); private static final Schema SCHEMA = @@ -60,8 +66,23 @@ public class HiveCreateReplaceTableTest extends HiveMetastoreTest { private String tableLocation; + @RegisterExtension + public static final HiveMetastoreExtension HIVE_METASTORE_EXTENSION = + new HiveMetastoreExtension(DB_NAME, Collections.emptyMap()); + + protected HiveCatalog catalog; + @BeforeEach public void createTableLocation() throws IOException { + catalog = + (HiveCatalog) + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(TimeUnit.SECONDS.toMillis(10))), + HIVE_METASTORE_EXTENSION.hiveConf()); tableLocation = temp.resolve("hive-").toString(); } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java index 255f7c7e0c72..e6f4a99cfcb0 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreExtension.java @@ -77,4 +77,8 @@ public HiveMetaStoreClient metastoreClient() { public HiveConf hiveConf() { return metastore.hiveConf(); } + + public TestHiveMetastore metastore() { + return metastore; + } } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java deleted file mode 100644 index c82517cea02f..000000000000 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iceberg.hive; - -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.iceberg.CatalogProperties; -import org.apache.iceberg.CatalogUtil; -import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; -import org.apache.iceberg.relocated.com.google.common.collect.Maps; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; - -/* - * This meta-setup has been deprecated; use {@link HiveMetastoreExtension} instead. - * */ -@Deprecated -public abstract class HiveMetastoreTest { - - protected static final String DB_NAME = "hivedb"; - protected static final long EVICTION_INTERVAL = TimeUnit.SECONDS.toMillis(10); - - protected static HiveMetaStoreClient metastoreClient; - protected static HiveCatalog catalog; - protected static HiveConf hiveConf; - protected static TestHiveMetastore metastore; - - @BeforeAll - public static void startMetastore() throws Exception { - startMetastore(Collections.emptyMap()); - } - - public static void startMetastore(Map hiveConfOverride) throws Exception { - HiveMetastoreTest.metastore = new TestHiveMetastore(); - HiveConf hiveConfWithOverrides = new HiveConf(TestHiveMetastore.class); - if (hiveConfOverride != null) { - for (Map.Entry kv : hiveConfOverride.entrySet()) { - hiveConfWithOverrides.set(kv.getKey(), kv.getValue()); - } - } - - metastore.start(hiveConfWithOverrides); - HiveMetastoreTest.hiveConf = metastore.hiveConf(); - HiveMetastoreTest.metastoreClient = new HiveMetaStoreClient(hiveConfWithOverrides); - String dbPath = metastore.getDatabasePath(DB_NAME); - Database db = new Database(DB_NAME, "description", dbPath, Maps.newHashMap()); - metastoreClient.createDatabase(db); - HiveMetastoreTest.catalog = - (HiveCatalog) - CatalogUtil.loadCatalog( - HiveCatalog.class.getName(), - CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, - ImmutableMap.of( - CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, - String.valueOf(EVICTION_INTERVAL)), - hiveConfWithOverrides); - } - - @AfterAll - public static void stopMetastore() throws Exception { - HiveMetastoreTest.catalog = null; - - metastoreClient.close(); - HiveMetastoreTest.metastoreClient = null; - - metastore.stop(); - HiveMetastoreTest.metastore = null; - } -} diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java index 51f4b5953276..4ed58502f153 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableBaseTest.java @@ -26,22 +26,34 @@ import java.io.File; import java.nio.file.Paths; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.TableMetadataParser; import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.types.Types; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.RegisterExtension; -public class HiveTableBaseTest extends HiveMetastoreTest { +public class HiveTableBaseTest { static final String TABLE_NAME = "tbl"; + static final String DB_NAME = "hivedb"; static final TableIdentifier TABLE_IDENTIFIER = TableIdentifier.of(DB_NAME, TABLE_NAME); + @RegisterExtension + public static final HiveMetastoreExtension HIVE_METASTORE_EXTENSION = + new HiveMetastoreExtension(DB_NAME, Collections.emptyMap()); + + protected HiveCatalog catalog; + static final Schema schema = new Schema(Types.StructType.of(required(1, "id", Types.LongType.get())).fields()); @@ -54,23 +66,22 @@ public class HiveTableBaseTest extends HiveMetastoreTest { private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build(); - private Path tableLocation; - @BeforeEach public void createTestTable() { - this.tableLocation = - new Path(catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec).location()); - } - - @AfterEach - public void dropTestTable() throws Exception { - // drop the table data - tableLocation.getFileSystem(hiveConf).delete(tableLocation, true); - catalog.dropTable(TABLE_IDENTIFIER, false /* metadata only, location was already deleted */); + catalog = + (HiveCatalog) + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(TimeUnit.SECONDS.toMillis(10))), + HIVE_METASTORE_EXTENSION.hiveConf()); + catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec); } private static String getTableBasePath(String tableName) { - String databasePath = metastore.getDatabasePath(DB_NAME); + String databasePath = HIVE_METASTORE_EXTENSION.metastore().getDatabasePath(DB_NAME); return Paths.get(databasePath, tableName).toAbsolutePath().toString(); } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java index 0b5edf21aec7..0fa6c94bf154 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveTableTest.java @@ -91,7 +91,9 @@ public void testCreate() throws TException { // Table should be renamed in hive metastore String tableName = TABLE_IDENTIFIER.name(); org.apache.hadoop.hive.metastore.api.Table table = - metastoreClient.getTable(TABLE_IDENTIFIER.namespace().level(0), tableName); + HIVE_METASTORE_EXTENSION + .metastoreClient() + .getTable(TABLE_IDENTIFIER.namespace().level(0), tableName); // check parameters are in expected state Map parameters = table.getParameters(); @@ -255,7 +257,7 @@ public void testExistingTableUpdate() throws TException { assertThat(icebergTable.schema().asStruct()).isEqualTo(altered.asStruct()); final org.apache.hadoop.hive.metastore.api.Table table = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); final List hiveColumns = table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList()); final List icebergColumns = @@ -309,10 +311,10 @@ public void testColumnTypeChangeInMetastore() throws TException { public void testFailure() throws TException { Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER); org.apache.hadoop.hive.metastore.api.Table table = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); String dummyLocation = "dummylocation"; table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation); - metastoreClient.alter_table(DB_NAME, TABLE_NAME, table); + HIVE_METASTORE_EXTENSION.metastoreClient().alter_table(DB_NAME, TABLE_NAME, table); assertThatThrownBy( () -> icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit()) .isInstanceOf(CommitFailedException.class) @@ -333,7 +335,7 @@ public void testListTables() throws TException, IOException { // create a hive table String hiveTableName = "test_hive_table"; org.apache.hadoop.hive.metastore.api.Table hiveTable = createHiveTable(hiveTableName); - metastoreClient.createTable(hiveTable); + HIVE_METASTORE_EXTENSION.metastoreClient().createTable(hiveTable); catalog.setListAllTables(false); List tableIdents1 = catalog.listTables(TABLE_IDENTIFIER.namespace()); @@ -344,7 +346,7 @@ public void testListTables() throws TException, IOException { assertThat(tableIdents2).as("should be 2 tables in namespace .").hasSize(2); assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isTrue(); - metastoreClient.dropTable(DB_NAME, hiveTableName); + HIVE_METASTORE_EXTENSION.metastoreClient().dropTable(DB_NAME, hiveTableName); } private org.apache.hadoop.hive.metastore.api.Table createHiveTable(String hiveTableName) @@ -410,13 +412,13 @@ public void testNonDefaultDatabaseLocation() throws IOException, TException { assertThat(table.location()).isEqualTo(namespaceMeta.get("location") + "/" + TABLE_NAME); // Drop the database and purge the files - metastoreClient.dropDatabase(NON_DEFAULT_DATABASE, true, true, true); + HIVE_METASTORE_EXTENSION.metastoreClient().dropDatabase(NON_DEFAULT_DATABASE, true, true, true); } @Test public void testRegisterTable() throws TException { org.apache.hadoop.hive.metastore.api.Table originalTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); Map originalParams = originalTable.getParameters(); assertThat(originalParams).isNotNull(); @@ -432,7 +434,7 @@ public void testRegisterTable() throws TException { catalog.registerTable(TABLE_IDENTIFIER, "file:" + metadataVersionFiles.get(0)); org.apache.hadoop.hive.metastore.api.Table newTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); Map newTableParameters = newTable.getParameters(); assertThat(newTableParameters) @@ -466,7 +468,7 @@ public void testRegisterHadoopTableToHiveCatalog() throws IOException, TExceptio .collect(Collectors.toList()); assertThat(metadataFiles).hasSize(2); - assertThatThrownBy(() -> metastoreClient.getTable(DB_NAME, "table1")) + assertThatThrownBy(() -> HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, "table1")) .isInstanceOf(NoSuchObjectException.class) .hasMessage("hivedb.table1 table not found"); assertThatThrownBy(() -> catalog.loadTable(identifier)) @@ -476,7 +478,7 @@ public void testRegisterHadoopTableToHiveCatalog() throws IOException, TExceptio // register the table to hive catalog using the latest metadata file String latestMetadataFile = ((BaseTable) table).operations().current().metadataFileLocation(); catalog.registerTable(identifier, "file:" + latestMetadataFile); - assertThat(metastoreClient.getTable(DB_NAME, "table1")).isNotNull(); + assertThat(HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, "table1")).isNotNull(); // load the table in hive catalog table = catalog.loadTable(identifier); @@ -523,7 +525,7 @@ private String appendData(Table table, String fileName) throws IOException { @Test public void testRegisterExistingTable() throws TException { org.apache.hadoop.hive.metastore.api.Table originalTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); Map originalParams = originalTable.getParameters(); assertThat(originalParams).isNotNull(); @@ -550,7 +552,7 @@ public void testEngineHiveEnabledDefault() throws TException { catalog.createTable(TABLE_IDENTIFIER, schema, PartitionSpec.unpartitioned()); org.apache.hadoop.hive.metastore.api.Table hmsTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); assertHiveEnabled(hmsTable, false); } @@ -565,7 +567,7 @@ public void testEngineHiveEnabledConfig() throws TException { catalog.createTable(TABLE_IDENTIFIER, schema, PartitionSpec.unpartitioned()); org.apache.hadoop.hive.metastore.api.Table hmsTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); assertHiveEnabled(hmsTable, true); @@ -575,7 +577,7 @@ public void testEngineHiveEnabledConfig() throws TException { catalog.getConf().set(ConfigProperties.ENGINE_HIVE_ENABLED, "false"); catalog.createTable(TABLE_IDENTIFIER, schema, PartitionSpec.unpartitioned()); - hmsTable = metastoreClient.getTable(DB_NAME, TABLE_NAME); + hmsTable = HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); assertHiveEnabled(hmsTable, false); } @@ -592,7 +594,7 @@ public void testEngineHiveEnabledTableProperty() throws TException { catalog.createTable(TABLE_IDENTIFIER, schema, PartitionSpec.unpartitioned(), tableProperties); org.apache.hadoop.hive.metastore.api.Table hmsTable = - metastoreClient.getTable(DB_NAME, TABLE_NAME); + HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); assertHiveEnabled(hmsTable, true); @@ -603,7 +605,7 @@ public void testEngineHiveEnabledTableProperty() throws TException { catalog.getConf().set(ConfigProperties.ENGINE_HIVE_ENABLED, "true"); catalog.createTable(TABLE_IDENTIFIER, schema, PartitionSpec.unpartitioned(), tableProperties); - hmsTable = metastoreClient.getTable(DB_NAME, TABLE_NAME); + hmsTable = HIVE_METASTORE_EXTENSION.metastoreClient().getTable(DB_NAME, TABLE_NAME); assertHiveEnabled(hmsTable, false); } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java index 19b9b0effbb4..9f451a3d5127 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestCachedClientPool.java @@ -28,14 +28,38 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.iceberg.CatalogProperties; import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.hive.CachedClientPool.Key; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; -public class TestCachedClientPool extends HiveMetastoreTest { +public class TestCachedClientPool { + + private static final long EVICTION_INTERVAL = TimeUnit.SECONDS.toMillis(10); + private HiveConf hiveConf; + private static final String DB_NAME = "hivedb"; + + @RegisterExtension + public static final HiveMetastoreExtension HIVE_METASTORE_EXTENSION = + new HiveMetastoreExtension(DB_NAME, Collections.emptyMap()); + + @BeforeEach + public void beforeEach() { + hiveConf = HIVE_METASTORE_EXTENSION.hiveConf(); + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(EVICTION_INTERVAL)), + hiveConf); + } @Test public void testClientPoolCleaner() throws InterruptedException { diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index 43d52a294438..fb09cb3a0dc1 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -18,8 +18,6 @@ */ package org.apache.iceberg.hive; -import static org.apache.iceberg.NullOrder.NULLS_FIRST; -import static org.apache.iceberg.SortDirection.ASC; import static org.apache.iceberg.TableProperties.CURRENT_SCHEMA; import static org.apache.iceberg.TableProperties.CURRENT_SNAPSHOT_ID; import static org.apache.iceberg.TableProperties.CURRENT_SNAPSHOT_SUMMARY; @@ -38,7 +36,6 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; @@ -54,20 +51,15 @@ import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.FileFormat; -import org.apache.iceberg.HasTableOperations; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.Snapshot; -import org.apache.iceberg.SortOrder; -import org.apache.iceberg.SortOrderParser; import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableOperations; import org.apache.iceberg.TableProperties; -import org.apache.iceberg.TestHelpers; -import org.apache.iceberg.Transaction; import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.Catalog; import org.apache.iceberg.catalog.CatalogTests; @@ -80,8 +72,6 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Maps; -import org.apache.iceberg.transforms.Transform; -import org.apache.iceberg.transforms.Transforms; import org.apache.iceberg.types.Types; import org.apache.iceberg.util.JsonUtil; import org.apache.thrift.TException; @@ -89,14 +79,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; /** - * Run all the tests from abstract of {@link CatalogTests}. Also, a few specific tests for HIVE too. - * There could be some duplicated tests that are already being covered with {@link CatalogTests} - * //TODO: remove duplicate tests with {@link CatalogTests}.Also use the DB/TABLE/SCHEMA from {@link - * CatalogTests} + * Run all the tests from abstract of {@link CatalogTests} with few specific tests related to HIVE. */ public class TestHiveCatalog extends CatalogTests { private static ImmutableMap meta = @@ -153,38 +138,6 @@ private Schema getTestSchema() { required(2, "data", Types.StringType.get())); } - @Test - public void testCreateTableBuilder() throws Exception { - Schema schema = getTestSchema(); - PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); - String location = temp.resolve("tbl").toString(); - - try { - Table table = - catalog - .buildTable(tableIdent, schema) - .withPartitionSpec(spec) - .withLocation(location) - .withProperty("key1", "value1") - .withProperty("key2", "value2") - .create(); - - assertThat(table.location()).isEqualTo(location); - assertThat(table.schema().columns()).hasSize(2); - assertThat(table.spec().fields()).hasSize(1); - assertThat(table.properties()).containsEntry("key1", "value1"); - assertThat(table.properties()).containsEntry("key2", "value2"); - // default Parquet compression is explicitly set for new tables - assertThat(table.properties()) - .containsEntry( - TableProperties.PARQUET_COMPRESSION, - TableProperties.PARQUET_COMPRESSION_DEFAULT_SINCE_1_4_0); - } finally { - catalog.dropTable(tableIdent); - } - } - @Test public void testCreateTableWithCaching() throws Exception { Schema schema = getTestSchema(); @@ -245,81 +198,6 @@ public void testInitializeCatalogWithProperties() { .isEqualTo("/user/hive/testwarehouse"); } - @Test - public void testCreateTableTxnBuilder() throws Exception { - Schema schema = getTestSchema(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); - String location = temp.resolve("tbl").toString(); - - try { - Transaction txn = - catalog.buildTable(tableIdent, schema).withLocation(location).createTransaction(); - txn.commitTransaction(); - Table table = catalog.loadTable(tableIdent); - - assertThat(table.location()).isEqualTo(location); - assertThat(table.schema().columns()).hasSize(2); - assertThat(table.spec().isUnpartitioned()).isTrue(); - } finally { - catalog.dropTable(tableIdent); - } - } - - @ParameterizedTest - @ValueSource(ints = {1, 2}) - public void testReplaceTxnBuilder(int formatVersion) { - Schema schema = getTestSchema(); - PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); - String location = temp.resolve("tbl").toString(); - - try { - Transaction createTxn = - catalog - .buildTable(tableIdent, schema) - .withPartitionSpec(spec) - .withLocation(location) - .withProperty("key1", "value1") - .withProperty(TableProperties.FORMAT_VERSION, String.valueOf(formatVersion)) - .createOrReplaceTransaction(); - createTxn.commitTransaction(); - - Table table = catalog.loadTable(tableIdent); - assertThat(table.spec().fields()).hasSize(1); - - String newLocation = temp.resolve("tbl-2").toString(); - - Transaction replaceTxn = - catalog - .buildTable(tableIdent, schema) - .withProperty("key2", "value2") - .withLocation(newLocation) - .replaceTransaction(); - replaceTxn.commitTransaction(); - - table = catalog.loadTable(tableIdent); - assertThat(table.location()).isEqualTo(newLocation); - assertThat(table.currentSnapshot()).isNull(); - if (formatVersion == 1) { - PartitionSpec v1Expected = - PartitionSpec.builderFor(table.schema()) - .alwaysNull("data", "data_bucket") - .withSpecId(1) - .build(); - assertThat(table.spec()) - .as("Table should have a spec with one void field") - .isEqualTo(v1Expected); - } else { - assertThat(table.spec().isUnpartitioned()).as("Table spec must be unpartitioned").isTrue(); - } - - assertThat(table.properties()).containsEntry("key1", "value1"); - assertThat(table.properties()).containsEntry("key2", "value2"); - } finally { - catalog.dropTable(tableIdent); - } - } - @Test public void testCreateTableWithOwner() throws Exception { createTableAndVerifyOwner( @@ -353,58 +231,6 @@ private void createTableAndVerifyOwner( } } - @Test - public void testCreateTableDefaultSortOrder() throws Exception { - Schema schema = getTestSchema(); - PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); - - try { - Table table = catalog.createTable(tableIdent, schema, spec); - assertThat(table.sortOrder().orderId()).as("Order ID must match").isEqualTo(0); - assertThat(table.sortOrder().isUnsorted()).as("Order must unsorted").isTrue(); - - assertThat(hmsTableParameters()) - .as("Must not have default sort order in catalog") - .doesNotContainKey(DEFAULT_SORT_ORDER); - } finally { - catalog.dropTable(tableIdent); - } - } - - @Test - public void testCreateTableCustomSortOrder() throws Exception { - Schema schema = getTestSchema(); - PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - SortOrder order = SortOrder.builderFor(schema).asc("id", NULLS_FIRST).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); - - try { - Table table = - catalog - .buildTable(tableIdent, schema) - .withPartitionSpec(spec) - .withSortOrder(order) - .create(); - SortOrder sortOrder = table.sortOrder(); - assertThat(sortOrder.orderId()).as("Order ID must match").isEqualTo(1); - assertThat(sortOrder.fields()).as("Order must have 1 field").hasSize(1); - assertThat(sortOrder.fields().get(0).direction()).as("Direction must match ").isEqualTo(ASC); - assertThat(sortOrder.fields().get(0).nullOrder()) - .as("Null order must match ") - .isEqualTo(NULLS_FIRST); - Transform transform = Transforms.identity(Types.IntegerType.get()); - assertThat(sortOrder.fields().get(0).transform()) - .as("Transform must match") - .isEqualTo(transform); - - assertThat(hmsTableParameters()) - .containsEntry(DEFAULT_SORT_ORDER, SortOrderParser.toJson(table.sortOrder())); - } finally { - catalog.dropTable(tableIdent); - } - } - @Test public void testDatabaseAndNamespaceWithLocation() throws Exception { Namespace namespace1 = Namespace.of("noLocation"); @@ -520,21 +346,6 @@ private void createNamespaceAndVerifyOwnership( assertThat(db.getOwnerType()).isEqualTo(expectedOwnerType); } - @Test - public void testListNamespace() throws TException { - List namespaces; - Namespace namespace1 = Namespace.of("dbname1"); - catalog.createNamespace(namespace1, meta); - namespaces = catalog.listNamespaces(namespace1); - assertThat(namespaces).as("Hive db not hive the namespace 'dbname1'").isEmpty(); - - Namespace namespace2 = Namespace.of("dbname2"); - catalog.createNamespace(namespace2, meta); - namespaces = catalog.listNamespaces(); - - assertThat(namespaces).as("Hive db not hive the namespace 'dbname2'").contains(namespace2); - } - @Test public void testLoadNamespaceMeta() throws TException { Namespace namespace = Namespace.of("dbname_load"); @@ -1191,35 +1002,4 @@ public void testDatabaseLocationWithSlashInWarehouseDir() { assertThat(database.getLocationUri()).isEqualTo("s3://bucket/database.db"); } - - @Test - public void testRegisterTable() { - TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); - catalog.dropTable(identifier, false); - TableOperations ops = ((HasTableOperations) registeringTable).operations(); - String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - Table registeredTable = catalog.registerTable(identifier, metadataLocation); - assertThat(registeredTable).isNotNull(); - TestHelpers.assertSerializedAndLoadedMetadata(registeringTable, registeredTable); - String expectedMetadataLocation = - ((HasTableOperations) registeredTable).operations().current().metadataFileLocation(); - assertThat(metadataLocation).isEqualTo(expectedMetadataLocation); - assertThat(catalog.loadTable(identifier)).isNotNull(); - assertThat(catalog.dropTable(identifier)).isTrue(); - } - - @Test - public void testRegisterExistingTable() { - TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); - TableOperations ops = ((HasTableOperations) registeringTable).operations(); - String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - assertThatThrownBy(() -> catalog.registerTable(identifier, metadataLocation)) - .isInstanceOf(AlreadyExistsException.class) - .hasMessage("Table already exists: hivedb.t1"); - assertThat(catalog.dropTable(identifier, true)).isTrue(); - } } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java index 2d0f3d23ad2a..0256e3a35b3e 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java @@ -18,6 +18,8 @@ */ package org.apache.iceberg.hive; +import static org.apache.iceberg.PartitionSpec.builderFor; +import static org.apache.iceberg.types.Types.NestedField.required; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.any; @@ -53,9 +55,14 @@ import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.HasTableOperations; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.exceptions.CommitFailedException; import org.apache.iceberg.hadoop.ConfigProperties; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; @@ -63,15 +70,15 @@ import org.apache.iceberg.types.Types; import org.apache.thrift.TException; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; import org.mockito.AdditionalAnswers; import org.mockito.ArgumentCaptor; import org.mockito.MockedStatic; import org.mockito.invocation.InvocationOnMock; -public class TestHiveCommitLocks extends HiveTableBaseTest { +public class TestHiveCommitLocks { private static HiveTableOperations spyOps = null; private static HiveClientPool spyClientPool = null; private static CachedClientPool spyCachedClientPool = null; @@ -88,13 +95,33 @@ public class TestHiveCommitLocks extends HiveTableBaseTest { LockResponse notAcquiredLockResponse = new LockResponse(dummyLockId, LockState.NOT_ACQUIRED); ShowLocksResponse emptyLocks = new ShowLocksResponse(Lists.newArrayList()); - @BeforeAll - public static void startMetastore() throws Exception { - HiveMetastoreTest.startMetastore( - ImmutableMap.of(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.varname, "1s")); - + private static final String DB_NAME = "hivedb"; + private static final String TABLE_NAME = "tbl"; + private static final Schema schema = + new Schema(Types.StructType.of(required(1, "id", Types.LongType.get())).fields()); + private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build(); + static final TableIdentifier TABLE_IDENTIFIER = TableIdentifier.of(DB_NAME, TABLE_NAME); + + @RegisterExtension + private static final HiveMetastoreExtension HIVE_METASTORE_EXTENSION = + new HiveMetastoreExtension( + DB_NAME, ImmutableMap.of(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.varname, "1s")); + + private HiveCatalog catalog; + + public void startMetastore() throws Exception { + this.catalog = + (HiveCatalog) + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(TimeUnit.SECONDS.toMillis(10))), + HIVE_METASTORE_EXTENSION.hiveConf()); + catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec); // start spies - overriddenHiveConf = new Configuration(hiveConf); + overriddenHiveConf = new Configuration(HIVE_METASTORE_EXTENSION.hiveConf()); overriddenHiveConf.setLong("iceberg.hive.lock-timeout-ms", 6 * 1000); overriddenHiveConf.setLong("iceberg.hive.lock-check-min-wait-ms", 50); overriddenHiveConf.setLong("iceberg.hive.lock-check-max-wait-ms", 5 * 1000); @@ -107,14 +134,16 @@ public static void startMetastore() throws Exception { .thenAnswer( invocation -> { // cannot spy on RetryingHiveMetastoreClient as it is a proxy - IMetaStoreClient client = spy(new HiveMetaStoreClient(hiveConf)); + IMetaStoreClient client = + spy(new HiveMetaStoreClient(HIVE_METASTORE_EXTENSION.hiveConf())); spyClientRef.set(client); return spyClientRef.get(); }); spyClientPool.run(IMetaStoreClient::isLocalMetaStore); // To ensure new client is created. - spyCachedClientPool = spy(new CachedClientPool(hiveConf, Collections.emptyMap())); + spyCachedClientPool = + spy(new CachedClientPool(HIVE_METASTORE_EXTENSION.hiveConf(), Collections.emptyMap())); when(spyCachedClientPool.clientPool()).thenAnswer(invocation -> spyClientPool); assertThat(spyClientRef.get()).isNotNull(); @@ -124,6 +153,7 @@ public static void startMetastore() throws Exception { @BeforeEach public void before() throws Exception { + startMetastore(); Table table = catalog.loadTable(TABLE_IDENTIFIER); ops = (HiveTableOperations) ((HasTableOperations) table).operations(); String dbName = TABLE_IDENTIFIER.namespace().level(0);