Date: Thu, 3 Jul 2025 17:45:35 +0200
Subject: [PATCH 07/27] HIVE-29016: revert to simpler cache but check that
cached table location is latest (Hive DB, get location) on loadTable()
ensuring no-stale table is returned;
---
.../org/apache/iceberg/hive/HiveCatalog.java | 36 ++++++-
.../iceberg/rest/HMSCachingCatalog.java | 45 +++++----
.../iceberg/rest/HMSCatalogFactory.java | 20 +---
.../apache/iceberg/rest/HMSEventListener.java | 97 -------------------
.../hadoop/hive/metastore/HiveMetaStore.java | 25 -----
5 files changed, 59 insertions(+), 164 deletions(-)
delete mode 100644 standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
index 72280449ad54..ce30a647c74e 100644
--- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
+++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
@@ -21,6 +21,7 @@
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configurable;
@@ -31,6 +32,7 @@
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -408,23 +410,46 @@ private void validateTableIsIcebergTableOrView(
*/
@Override
public boolean tableExists(TableIdentifier identifier) {
+ return Objects.nonNull(fetchTable(identifier));
+ }
+
+ /**
+ * Check whether table or metadata table exists and return its location.
+ *
+ * Note: If a hive table with the same identifier exists in catalog, this method will return
+ * {@code null}.
+ *
+ * @param identifier a table identifier
+ * @return the location of the table if it exists, null otherwise
+ */
+ public String getTableLocation(TableIdentifier identifier) {
+ Table table = fetchTable(identifier);
+ if (table == null) {
+ return null;
+ }
+ return table.getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
+ }
+
+ private Table fetchTable(TableIdentifier identifier) {
TableIdentifier baseTableIdentifier = identifier;
if (!isValidIdentifier(identifier)) {
if (!isValidMetadataIdentifier(identifier)) {
- return false;
+ return null;
} else {
baseTableIdentifier = TableIdentifier.of(identifier.namespace().levels());
}
}
-
String database = baseTableIdentifier.namespace().level(0);
String tableName = baseTableIdentifier.name();
try {
- Table table = clients.run(client -> client.getTable(database, tableName));
+ GetTableRequest request = new GetTableRequest();
+ request.setDbName(database);
+ request.setTblName(tableName);
+ Table table = clients.run(client -> client.getTable(request));
HiveOperationsBase.validateTableIsIceberg(table, fullTableName(name, baseTableIdentifier));
- return true;
+ return table;
} catch (NoSuchTableException | NoSuchObjectException e) {
- return false;
+ return null;
} catch (TException e) {
throw new RuntimeException("Failed to check table existence of " + baseTableIdentifier, e);
} catch (InterruptedException e) {
@@ -434,6 +459,7 @@ public boolean tableExists(TableIdentifier identifier) {
}
}
+
@Override
public boolean viewExists(TableIdentifier viewIdentifier) {
if (!isValidIdentifier(viewIdentifier)) {
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
index 79a899cec87a..14074da49df4 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
@@ -25,6 +25,7 @@
import java.util.Set;
import org.apache.iceberg.CachingCatalog;
import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
@@ -51,17 +52,6 @@ public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
this.hiveCatalog = catalog;
}
- public void invalidateTable(String dbName, String tableName) {
- super.invalidateTable(TableIdentifier.of(dbName, tableName));
- }
-
- @Override
- public void invalidateTable(TableIdentifier tableIdentifier) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Avoid invalidating table: {}", tableIdentifier);
- }
- }
-
@Override
public void createNamespace(Namespace nmspc, Map map) {
hiveCatalog.createNamespace(nmspc, map);
@@ -72,6 +62,27 @@ public List listNamespaces(Namespace nmspc) throws NoSuchNamespaceExc
return hiveCatalog.listNamespaces(nmspc);
}
+ @Override
+ public Table loadTable(TableIdentifier identifier) {
+ TableIdentifier canonicalIdentifier = identifier.toLowerCase();
+ Table cachedTable = tableCache.getIfPresent(canonicalIdentifier);
+ if (cachedTable != null) {
+ String location = hiveCatalog.getTableLocation(canonicalIdentifier);
+ if (location == null) {
+ LOG.debug("Table {} has no location, returning cached table without location", canonicalIdentifier);
+ } else if (!location.equals(cachedTable.location())) {
+ LOG.debug("Cached table {} has a different location than the one in the catalog: {} != {}",
+ canonicalIdentifier, cachedTable.location(), location);
+ } else {
+ LOG.debug("Returning cached table: {}", canonicalIdentifier);
+ return cachedTable;
+ }
+ // Invalidate the cached table if the location is different
+ tableCache.invalidate(cachedTable);
+ }
+ return super.loadTable(identifier);
+ }
+
@Override
public Map loadNamespaceMetadata(Namespace nmspc) throws NoSuchNamespaceException {
return hiveCatalog.loadNamespaceMetadata(nmspc);
@@ -79,6 +90,10 @@ public Map loadNamespaceMetadata(Namespace nmspc) throws NoSuchN
@Override
public boolean dropNamespace(Namespace nmspc) throws NamespaceNotEmptyException {
+ List tables = listTables(nmspc);
+ for (TableIdentifier ident : tables) {
+ invalidateTable(ident);
+ }
return hiveCatalog.dropNamespace(nmspc);
}
@@ -102,14 +117,6 @@ public Catalog.TableBuilder buildTable(TableIdentifier identifier, Schema schema
return hiveCatalog.buildTable(identifier, schema);
}
-
- public void invalidateNamespace(String namespace) {
- Namespace ns = Namespace.of(namespace);
- for (TableIdentifier table : listTables(ns)) {
- invalidateTable(table);
- }
- }
-
@Override
public List listViews(Namespace namespace) {
return hiveCatalog.listViews(namespace);
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
index 7a4c28e5bb2b..508d17e533ed 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
@@ -18,11 +18,8 @@
*/
package org.apache.iceberg.rest;
-import java.lang.ref.Reference;
-import java.lang.ref.SoftReference;
import java.util.Map;
import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicReference;
import javax.servlet.http.HttpServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.ServletSecurity;
@@ -36,24 +33,12 @@
/**
* Catalog & servlet factory.
+ * This class is derivable on purpose; the factory class name is a configuration property, this class
+ * can serve as a base for specialization.
*/
public class HMSCatalogFactory {
private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
- /**
- * Convenience soft reference to last catalog.
- */
- protected static final AtomicReference> catalogRef = new AtomicReference<>();
-
- public static Catalog getLastCatalog() {
- Reference soft = catalogRef.get();
- return soft != null ? soft.get() : null;
- }
-
- protected static void setLastCatalog(Catalog catalog) {
- catalogRef.set(new SoftReference<>(catalog));
- }
-
protected final Configuration configuration;
protected final int port;
protected final String path;
@@ -128,7 +113,6 @@ protected HttpServlet createServlet(Catalog catalog) {
protected HttpServlet createServlet() {
if (port >= 0 && path != null && !path.isEmpty()) {
Catalog actualCatalog = createCatalog();
- setLastCatalog(actualCatalog);
return createServlet(actualCatalog);
}
return null;
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
deleted file mode 100644
index 256b24ffe253..000000000000
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iceberg.rest;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.ReloadEvent;
-import org.apache.iceberg.catalog.Catalog;
-import org.slf4j.Logger;
-
-/**
- * IcebergEventListener is a Hive Metastore event listener that invalidates the cache
- * of the HMSCachingCatalog when certain events occur, such as altering or dropping a table.
- */
-public class HMSEventListener extends MetaStoreEventListener {
- private static final Logger LOG = org.slf4j.LoggerFactory.getLogger(HMSEventListener.class);
- /**
- * Constructor for HMSEventListener.
- *
- * @param config the configuration to use for the listener
- */
- public HMSEventListener(Configuration config) {
- super(config);
- }
-
-
- private Catalog getCatalog() {
- return HMSCatalogFactory.getLastCatalog();
- }
-
- @Override
- public void onAlterTable(AlterTableEvent event) {
- Catalog catalog = getCatalog();
- if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
- String dbName = event.getOldTable().getDbName();
- String tableName = event.getOldTable().getTableName();
- LOG.debug("onAlterTable: invalidating table cache for {}.{}", dbName, tableName);
- hmsCachingCatalog.invalidateTable(dbName, tableName);
- }
- }
-
- @Override
- public void onDropTable(DropTableEvent event) {
- Catalog catalog = getCatalog();
- if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
- String dbName = event.getTable().getDbName();
- String tableName = event.getTable().getTableName();
- LOG.debug("onDropTable: invalidating table cache for {}.{}", dbName, tableName);
- hmsCachingCatalog.invalidateTable(dbName, tableName);
- }
- }
-
- @Override
- public void onReload(ReloadEvent reloadEvent) {
- Catalog catalog = getCatalog();
- if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
- Table tableObj = reloadEvent.getTableObj();
- String dbName = tableObj.getDbName();
- String tableName = tableObj.getTableName();
- LOG.debug("onReload: invalidating table cache for {}.{}", dbName, tableName);
- hmsCachingCatalog.invalidateTable(dbName, tableName);
- }
- }
-
- @Override
- public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
- Catalog catalog = getCatalog();
- if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
- String dbName = dbEvent.getDatabase().getName();
- LOG.debug("onDropDatabase: invalidating tables cache for {}", dbName);
- hmsCachingCatalog.invalidateNamespace(dbName);
- }
- }
-
-}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 15ee7fabbebc..67e009aedc9b 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -670,30 +670,6 @@ private static void constraintHttpMethods(ServletContextHandler ctxHandler, bool
ctxHandler.setSecurityHandler(securityHandler);
}
- /**
- * Configure the metastore to propagate events to eventual Iceberg catalog.
- * @param conf the configuration
- */
- private static void configureIcebergCacheHandling(Configuration conf) {
- // If we start a REST catalog, we need to listen to events to maintain its consistency.
- String eventListenerClass = MetastoreConf.getVar(conf, ConfVars.ICEBERG_CATALOG_EVENT_LISTENER_CLASS);
- if (eventListenerClass != null && !eventListenerClass.isEmpty()) {
- // if expiry is negative, no cache is used, so no need to register the listener
- long expiry = MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY);
- // if the port is negative, no REST catalog is configured, so no need to register the listener
- int icebergPort = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT);
- if (icebergPort >= 0 && expiry > 0) {
- LOG.info("Configuring Iceberg catalog event listener: {}", eventListenerClass);
- String listeners = MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS);
- if (listeners == null || listeners.isEmpty()) {
- MetastoreConf.setVar(conf, ConfVars.EVENT_LISTENERS, eventListenerClass);
- } else {
- MetastoreConf.setVar(conf, ConfVars.EVENT_LISTENERS, listeners + "," + eventListenerClass);
- }
- }
- }
- }
-
/**
* Start Metastore based on a passed {@link HadoopThriftAuthBridge}.
*
@@ -709,7 +685,6 @@ private static void configureIcebergCacheHandling(Configuration conf) {
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge,
Configuration conf, boolean startMetaStoreThreads, AtomicBoolean startedBackgroundThreads) throws Throwable {
// If we start an Iceberg REST catalog, we need to listen to events to maintain its consistency.
- configureIcebergCacheHandling(conf);
isMetaStoreRemote = true;
String transportMode = MetastoreConf.getVar(conf, ConfVars.THRIFT_TRANSPORT_MODE, "binary");
boolean isHttpTransport = transportMode.equalsIgnoreCase("http");
From 7d611d3e15888093e2d0d18da0214d5addc9c2f2 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Tue, 8 Jul 2025 16:43:43 +0200
Subject: [PATCH 08/27] HIVE-29016: clean up;
---
.../hive/metastore/conf/MetastoreConf.java | 7 +-
.../org/apache/iceberg/rest/HMSTestBase.java | 452 ------------------
.../apache/iceberg/rest/TestHMSCatalog.java | 265 ----------
.../rest/extension/RESTCatalogServer.java | 1 -
.../hadoop/hive/metastore/HiveMetaStore.java | 1 -
5 files changed, 1 insertion(+), 725 deletions(-)
delete mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/HMSTestBase.java
delete mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCatalog.java
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 06026bf3f537..c491afd6e105 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -1884,14 +1884,9 @@ public enum ConfVars {
"HMS Iceberg Catalog servlet path component of URL endpoint."
),
ICEBERG_CATALOG_CACHE_EXPIRY("metastore.iceberg.catalog.cache.expiry",
- "hive.metastore.iceberg.catalog.cache.expiry", -1,
+ "hive.metastore.iceberg.catalog.cache.expiry", 600_000L,
"HMS Iceberg Catalog cache expiry."
),
- ICEBERG_CATALOG_EVENT_LISTENER_CLASS("hive.metastore.catalog.event.listener.class",
- "hive.metastore.catalog.event.listener.class",
- "org.apache.iceberg.rest.HMSEventListener",
- "HMS Iceberg Catalog event listener class name."
- ),
HTTPSERVER_THREADPOOL_MIN("hive.metastore.httpserver.threadpool.min",
"hive.metastore.httpserver.threadpool.min", 8,
"HMS embedded HTTP server minimum number of threads."
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/HMSTestBase.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/HMSTestBase.java
deleted file mode 100644
index 696d0a071b5a..000000000000
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/HMSTestBase.java
+++ /dev/null
@@ -1,452 +0,0 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one
-// * or more contributor license agreements. See the NOTICE file
-// * distributed with this work for additional information
-// * regarding copyright ownership. The ASF licenses this file
-// * to you under the Apache License, Version 2.0 (the
-// * "License"); you may not use this file except in compliance
-// * with the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing,
-// * software distributed under the License is distributed on an
-// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// * KIND, either express or implied. See the License for the
-// * specific language governing permissions and limitations
-// * under the License.
-// */
-//
-//package org.apache.iceberg.rest;
-//
-//import com.codahale.metrics.Counter;
-//import com.codahale.metrics.MetricRegistry;
-//import static com.github.tomakehurst.wiremock.client.WireMock.get;
-//import static com.github.tomakehurst.wiremock.client.WireMock.ok;
-//import com.github.tomakehurst.wiremock.junit.WireMockRule;
-//import com.nimbusds.jose.JWSAlgorithm;
-//import com.nimbusds.jose.JWSHeader;
-//import com.nimbusds.jose.JWSSigner;
-//import com.nimbusds.jose.crypto.RSASSASigner;
-//import com.nimbusds.jose.jwk.RSAKey;
-//import com.nimbusds.jwt.JWTClaimsSet;
-//import com.nimbusds.jwt.SignedJWT;
-//import java.io.BufferedReader;
-//import java.io.DataOutputStream;
-//import java.io.File;
-//import java.io.IOException;
-//import java.io.InputStream;
-//import java.io.InputStreamReader;
-//import java.io.Reader;
-//import java.net.HttpURLConnection;
-//import java.net.URL;
-//import java.nio.charset.StandardCharsets;
-//import java.nio.file.Files;
-//import java.nio.file.Path;
-//import java.util.Collections;
-//import java.util.Date;
-//import java.util.HashMap;
-//import java.util.LinkedHashMap;
-//import java.util.List;
-//import java.util.Map;
-//import java.util.Random;
-//import java.util.UUID;
-//import java.util.concurrent.TimeUnit;
-//import javax.servlet.http.HttpServletResponse;
-//import org.apache.commons.jexl3.JexlBuilder;
-//import org.apache.commons.jexl3.JexlContext;
-//import org.apache.commons.jexl3.JexlEngine;
-//import org.apache.commons.jexl3.JexlException;
-//import org.apache.commons.jexl3.JexlFeatures;
-//import org.apache.commons.jexl3.MapContext;
-//import org.apache.commons.jexl3.introspection.JexlPermissions;
-//import org.apache.hadoop.conf.Configuration;
-//import org.apache.hadoop.hive.conf.HiveConf;
-//import org.apache.hadoop.hive.metastore.HiveMetaException;
-//import org.apache.hadoop.hive.metastore.HiveMetaStore;
-//import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-//import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo;
-//import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
-//import org.apache.hadoop.hive.metastore.ObjectStore;
-//import org.apache.hadoop.hive.metastore.api.Database;
-//import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-//import org.apache.hadoop.hive.metastore.metrics.Metrics;
-//import org.apache.hadoop.hive.metastore.properties.HMSPropertyManager;
-//import org.apache.hadoop.hive.metastore.properties.PropertyManager;
-//import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-//import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-//import org.apache.hive.iceberg.com.fasterxml.jackson.core.JsonProcessingException;
-//import org.apache.hive.iceberg.com.fasterxml.jackson.core.type.TypeReference;
-//import org.apache.hive.iceberg.com.fasterxml.jackson.databind.ObjectMapper;
-//import org.apache.iceberg.catalog.Catalog;
-//import org.apache.iceberg.catalog.SupportsNamespaces;
-//import org.apache.iceberg.hive.IcebergTestHelper;
-//import org.eclipse.jetty.server.Server;
-//import org.junit.After;
-//import org.junit.Assert;
-//import org.junit.Before;
-//import org.junit.ClassRule;
-//import org.junit.Rule;
-//import org.junit.rules.TemporaryFolder;
-//import org.slf4j.Logger;
-//import org.slf4j.LoggerFactory;
-//
-//public abstract class HMSTestBase {
-// protected static final Logger LOG = LoggerFactory.getLogger(HMSTestBase.class.getName());
-// protected static final String BASE_DIR = System.getProperty("basedir");
-// protected static Random RND = new Random(20230922);
-// protected static final String USER_1 = "USER_1";
-// protected static final String DB_NAME = "hivedb";
-// /** A Jexl engine for convenience. */
-// static final JexlEngine JEXL;
-// static {
-// JexlFeatures features = new JexlFeatures()
-// .sideEffect(false)
-// .sideEffectGlobal(false);
-// JexlPermissions p = JexlPermissions.RESTRICTED
-// .compose("org.apache.hadoop.hive.metastore.*", "org.apache.iceberg.*");
-// JEXL = new JexlBuilder()
-// .features(features)
-// .permissions(p)
-// .create();
-// }
-//
-// protected static final long EVICTION_INTERVAL = TimeUnit.SECONDS.toMillis(10);
-// private static final File JWT_AUTHKEY_FILE =
-// new File(BASE_DIR,"src/test/resources/auth/jwt/jwt-authorized-key.json");
-// protected static final File JWT_NOAUTHKEY_FILE =
-// new File(BASE_DIR,"src/test/resources/auth/jwt/jwt-unauthorized-key.json");
-// protected static final File JWT_JWKS_FILE =
-// new File(BASE_DIR,"src/test/resources/auth/jwt/jwt-verification-jwks.json");
-// protected static final int MOCK_JWKS_SERVER_PORT = 8089;
-// @ClassRule
-// public static final WireMockRule MOCK_JWKS_SERVER = new WireMockRule(MOCK_JWKS_SERVER_PORT);
-//
-//
-// public static class TestSchemaInfo extends MetaStoreSchemaInfo {
-// public TestSchemaInfo(String metastoreHome, String dbType) throws HiveMetaException {
-// super(metastoreHome, dbType);
-// }
-// @Override
-// public String getMetaStoreScriptDir() {
-// return new File(BASE_DIR, "../metastore-server/src/main/sql/derby").getAbsolutePath();
-// }
-// }
-//
-// @Rule
-// public TemporaryFolder temp = new TemporaryFolder();
-//
-// protected Configuration conf = null;
-// protected String NS = "hms" + RND.nextInt(100);
-//
-// protected int port = -1;
-// protected int catalogPort = -1;
-// protected final String catalogPath = "hmscatalog";
-// protected static final int WAIT_FOR_SERVER = 5000;
-// // for direct calls
-// protected Catalog catalog;
-// protected SupportsNamespaces nsCatalog;
-//
-// protected int createMetastoreServer(Configuration conf) throws Exception {
-// return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
-// }
-//
-// protected void stopMetastoreServer(int port) {
-// MetaStoreTestUtils.close(port);
-// }
-//
-// @Before
-// public void setUp() throws Exception {
-// NS = "hms" + RND.nextInt(100);
-// conf = MetastoreConf.newMetastoreConf();
-// MetaStoreTestUtils.setConfForStandloneMode(conf);
-// MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY, 60_000L);
-// MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.CAPABILITY_CHECK, false);
-// MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
-// // new 2024-10-02
-// MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.SCHEMA_VERIFICATION, false);
-//
-// conf.setBoolean(MetastoreConf.ConfVars.METRICS_ENABLED.getVarname(), true);
-// // "hive.metastore.warehouse.dir"
-// String whpath = new File(BASE_DIR,"target/tmp/warehouse/managed").toURI().toString();
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.WAREHOUSE, whpath);
-// HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_WAREHOUSE, whpath);
-// // "hive.metastore.warehouse.external.dir"
-// String extwhpath = new File(BASE_DIR,"target/tmp/warehouse/external").toURI().toString();
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL, extwhpath);
-// conf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, extwhpath);
-//
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.SCHEMA_INFO_CLASS, "org.apache.iceberg.rest.HMSTestBase$TestSchemaInfo");
-// // Events that get cleaned happen in batches of 1 to exercise batching code
-// MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.EVENT_CLEAN_MAX_EVENTS, 1L);
-// MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_SERVLET_PORT, 0);
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_SERVLET_AUTH, "jwt");
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_SERVLET_PATH, catalogPath);
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_METASTORE_AUTHENTICATION_JWT_JWKS_URL,
-// "http://localhost:" + MOCK_JWKS_SERVER_PORT + "/jwks");
-// MOCK_JWKS_SERVER.stubFor(get("/jwks")
-// .willReturn(ok()
-// .withBody(Files.readAllBytes(JWT_JWKS_FILE.toPath()))));
-// Metrics.initialize(conf);
-// // The server
-// port = createMetastoreServer(conf);
-// System.out.println("Starting MetaStore Server on port " + port);
-// // The manager decl
-// PropertyManager.declare(NS, HMSPropertyManager.class);
-// // The client
-// HiveMetaStoreClient client = createClient(conf);
-// Assert.assertNotNull("Unable to connect to the MetaStore server", client);
-//
-// // create a managed root
-// String location = temp.newFolder("hivedb2023").getAbsolutePath();
-// Database db = new Database(DB_NAME, "catalog test", location, Collections.emptyMap());
-// client.createDatabase(db);
-//
-// Catalog ice = acquireServer();
-// catalog = ice;
-// nsCatalog = catalog instanceof SupportsNamespaces? (SupportsNamespaces) catalog : null;
-// catalogPort = HiveMetaStore.getCatalogServletPort();
-// }
-//
-// private static String format(String format, Object... params) {
-// return org.slf4j.helpers.MessageFormatter.arrayFormat(format, params).getMessage();
-// }
-//
-// private static Catalog acquireServer() throws InterruptedException {
-// final int wait = 200;
-// Server iceServer = HiveMetaStore.getServletServer();
-// int tries = WAIT_FOR_SERVER / wait;
-// while(iceServer == null && tries-- > 0) {
-// Thread.sleep(wait);
-// iceServer = HiveMetaStore.getServletServer();
-// }
-// if (iceServer != null) {
-// boolean starting;
-// tries = WAIT_FOR_SERVER / wait;
-// while((starting = iceServer.isStarting()) && tries-- > 0) {
-// Thread.sleep(wait);
-// }
-// if (starting) {
-// LOG.warn("server still starting after {}ms", WAIT_FOR_SERVER);
-// }
-// Catalog ice = HMSCatalogFactory.getLastCatalog();
-// if (ice == null) {
-// throw new NullPointerException(format("unable to acquire catalog after {}ms", WAIT_FOR_SERVER));
-// }
-// return ice;
-// } else {
-// throw new NullPointerException(format("unable to acquire server after {}ms", WAIT_FOR_SERVER));
-// }
-// }
-//
-// protected HiveMetaStoreClient createClient(Configuration conf) throws Exception {
-// MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "");
-// MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.EXECUTE_SET_UGI, false);
-// return new HiveMetaStoreClient(conf);
-// }
-//
-// /**
-// * @param apis a list of api calls
-// * @return the map of HMSCatalog route counter metrics keyed by their names
-// */
-// static Map reportMetricCounters(String... apis) {
-// Map map = new LinkedHashMap<>();
-// MetricRegistry registry = Metrics.getRegistry();
-// List names = HMSCatalogAdapter.getMetricNames(apis);
-// for(String name : names) {
-// Counter counter = registry.counter(name);
-// if (counter != null) {
-// long count = counter.getCount();
-// map.put(name, count);
-// }
-// }
-// return map;
-// }
-//
-// @After
-// public void tearDown() throws Exception {
-// try {
-// if (port >= 0) {
-// System.out.println("Stopping MetaStore Server on port " + port);
-// stopMetastoreServer(port);
-// port = -1;
-// }
-// // Clear the SSL system properties before each test.
-// System.clearProperty(ObjectStore.TRUSTSTORE_PATH_KEY);
-// System.clearProperty(ObjectStore.TRUSTSTORE_PASSWORD_KEY);
-// System.clearProperty(ObjectStore.TRUSTSTORE_TYPE_KEY);
-// //
-// IcebergTestHelper.invalidatePoolCache();
-// } finally {
-// catalog = null;
-// nsCatalog = null;
-// catalogPort = -1;
-// conf = null;
-// }
-// }
-//
-// protected String generateJWT() throws Exception {
-// return generateJWT(JWT_AUTHKEY_FILE.toPath());
-// }
-// protected String generateJWT(Path path) throws Exception {
-// return generateJWT(USER_1, path, TimeUnit.MINUTES.toMillis(5));
-// }
-//
-// private static String generateJWT(String user, Path keyFile, long lifeTimeMillis) throws Exception {
-// RSAKey rsaKeyPair = RSAKey.parse(new String(java.nio.file.Files.readAllBytes(keyFile), StandardCharsets.UTF_8));
-// // Create RSA-signer with the private key
-// JWSSigner signer = new RSASSASigner(rsaKeyPair);
-// JWSHeader header = new JWSHeader
-// .Builder(JWSAlgorithm.RS256)
-// .keyID(rsaKeyPair.getKeyID())
-// .build();
-// Date now = new Date();
-// Date expirationTime = new Date(now.getTime() + lifeTimeMillis);
-// JWTClaimsSet claimsSet = new JWTClaimsSet.Builder()
-// .jwtID(UUID.randomUUID().toString())
-// .issueTime(now)
-// .issuer("auth-server")
-// .subject(user)
-// .expirationTime(expirationTime)
-// .claim("custom-claim-or-payload", "custom-claim-or-payload")
-// .build();
-// SignedJWT signedJWT = new SignedJWT(header, claimsSet);
-// // Compute the RSA signature
-// signedJWT.sign(signer);
-// return signedJWT.serialize();
-// }
-//
-// /**
-// * Performs a Json client call.
-// * @param jwt the jwt token
-// * @param url the url
-// * @param method the http method
-// * @param arg the argument that will be transported as JSon
-// * @return the result the was returned through Json
-// * @throws IOException if marshalling the request/response fail
-// */
-// public static Object clientCall(String jwt, URL url, String method, Object arg) throws IOException {
-// return clientCall(jwt, url, method, true, arg);
-// }
-//
-// public static class ServerResponse {
-// private final int code;
-// private final String content;
-// public ServerResponse(int code, String content) {
-// this.code = code;
-// this.content = content;
-// }
-// }
-//
-// /**
-// * Performs an http client call.
-// * @param jwt a JWT bearer token (can be null)
-// * @param url the url to call
-// * @param method the http method to use
-// * @param json whether the call is application/json (true) or application/x-www-form-urlencoded (false)
-// * @param arg the query argument
-// * @return the (JSON) response
-// * @throws IOException
-// */
-// public static Object clientCall(String jwt, URL url, String method, boolean json, Object arg) throws IOException {
-// HttpURLConnection con = (HttpURLConnection) url.openConnection();
-// try {
-// if ("PATCH".equals(method)) {
-// con.setRequestMethod("POST");
-// con.setRequestProperty("X-HTTP-Method-Override", "PATCH");
-// } else {
-// con.setRequestMethod(method);
-// }
-// con.setRequestProperty(MetaStoreUtils.USER_NAME_HTTP_HEADER, url.getUserInfo());
-// if (json) {
-// con.setRequestProperty("Content-Type", "application/json");
-// } else {
-// con.setRequestProperty("Content-Type", "application/x-www-form-urlencoded");
-// }
-// con.setRequestProperty("Accept", "application/json");
-// if (jwt != null) {
-// con.setRequestProperty("Authorization", "Bearer " + jwt);
-// }
-// con.setDoInput(true);
-// if (arg != null) {
-// con.setDoOutput(true);
-// try (DataOutputStream wr = new DataOutputStream(con.getOutputStream())) {
-// if (json) {
-// wr.writeBytes(serialize(arg));
-// } else {
-// wr.writeBytes(arg.toString());
-// }
-// wr.flush();
-// }
-// }
-// // perform http method
-// return httpResponse(con);
-// } finally {
-// con.disconnect();
-// }
-// }
-//
-// private static Object httpResponse(HttpURLConnection con) throws IOException {
-// int responseCode = con.getResponseCode();
-// InputStream responseStream = con.getErrorStream();
-// if (responseStream == null) {
-// try {
-// responseStream = con.getInputStream();
-// } catch (IOException e) {
-// return new ServerResponse(responseCode, e.getMessage());
-// }
-// }
-// if (responseStream != null) {
-// try (BufferedReader reader = new BufferedReader(
-// new InputStreamReader(responseStream, StandardCharsets.UTF_8))) {
-// // if not strictly ok, check we are still receiving a JSON
-// if (responseCode != HttpServletResponse.SC_OK) {
-// String contentType = con.getContentType();
-// if (contentType == null || !contentType.contains("application/json")) {
-// String line;
-// StringBuilder response = new StringBuilder("error " + responseCode + ":");
-// while ((line = reader.readLine()) != null) response.append(line);
-// return new ServerResponse(responseCode, response.toString());
-// }
-// }
-// // there might be no answer which is still ok
-// Object r = reader.ready() ? deserialize(reader) : new HashMap<>(1);
-// if (r instanceof Map) {
-// ((Map) r).put("status", responseCode);
-// }
-// return r;
-// }
-// }
-// return responseCode;
-//}
-//
-// private static final ObjectMapper MAPPER = RESTObjectMapper.mapper();
-//
-// static String serialize(T object) {
-// try {
-// return MAPPER.writeValueAsString(object);
-// } catch (JsonProcessingException xany) {
-// throw new RuntimeException(xany);
-// }
-// }
-//
-// static T deserialize(Reader s) {
-// try {
-// return MAPPER.readValue(s, new TypeReference() {});
-// } catch (IOException xany) {
-// throw new RuntimeException(xany);
-// }
-// }
-//
-// static Object eval(Object properties, String expr) {
-// try {
-// JexlContext context = properties instanceof Map
-// ? new MapContext((Map) properties)
-// : JexlEngine.EMPTY_CONTEXT;
-// Object result = JEXL.createScript(expr).execute(context, properties);
-// return result;
-// } catch (JexlException xany) {
-// throw xany;
-// }
-// }
-//}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCatalog.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCatalog.java
deleted file mode 100644
index bfe9d1f3a416..000000000000
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCatalog.java
+++ /dev/null
@@ -1,265 +0,0 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one
-// * or more contributor license agreements. See the NOTICE file
-// * distributed with this work for additional information
-// * regarding copyright ownership. The ASF licenses this file
-// * to you under the Apache License, Version 2.0 (the
-// * "License"); you may not use this file except in compliance
-// * with the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing,
-// * software distributed under the License is distributed on an
-// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// * KIND, either express or implied. See the License for the
-// * specific language governing permissions and limitations
-// * under the License.
-// */
-//
-//package org.apache.iceberg.rest;
-//
-//import java.io.IOException;
-//import java.net.URI;
-//import java.net.URL;
-//import java.util.Collections;
-//import java.util.List;
-//import java.util.Map;
-//
-//import org.apache.hadoop.conf.Configuration;
-//import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-//import org.apache.hadoop.hive.metastore.api.Database;
-//import org.apache.iceberg.CatalogUtil;
-//import org.apache.iceberg.Schema;
-//import org.apache.iceberg.Table;
-//import org.apache.iceberg.Transaction;
-//import org.apache.iceberg.catalog.Namespace;
-//import org.apache.iceberg.catalog.TableIdentifier;
-//import org.apache.iceberg.exceptions.NoSuchTableException;
-//import org.apache.iceberg.relocated.com.google.common.collect.Maps;
-//import org.apache.iceberg.rest.requests.CreateTableRequest;
-//import org.apache.iceberg.rest.requests.RenameTableRequest;
-//import org.apache.iceberg.types.Types;
-//import static org.apache.iceberg.types.Types.NestedField.required;
-//import org.junit.After;
-//import org.junit.Assert;
-//import org.junit.Before;
-//import org.junit.Test;
-//
-//public class TestHMSCatalog extends HMSTestBase {
-// public TestHMSCatalog() {
-// super();
-// }
-//
-// @Before
-// @Override
-// public void setUp() throws Exception {
-// super.setUp();
-// }
-//
-// @After
-// @Override
-// public void tearDown() throws Exception {
-// super.tearDown();
-// }
-//
-// @Test
-// public void testCreateNamespaceHttp() throws Exception {
-// String ns = "nstesthttp";
-// // list namespaces
-// URL url = new URL("http://hive@localhost:" + catalogPort + "/"+catalogPath+"/v1/namespaces");
-// String jwt = generateJWT();
-// // check namespaces list (ie 0)
-// Object response = clientCall(jwt, url, "GET", null);
-// Assert.assertTrue(response instanceof Map);
-// Map nsrep = (Map) response;
-// List> nslist = (List>) nsrep.get("namespaces");
-// Assert.assertEquals(2, nslist.size());
-// Assert.assertTrue((nslist.contains(Collections.singletonList("default"))));
-// Assert.assertTrue((nslist.contains(Collections.singletonList("hivedb"))));
-// // succeed
-// response = clientCall(jwt, url, "POST", false, "{ \"namespace\" : [ \""+ns+"\" ], "+
-// "\"properties\":{ \"owner\": \"apache\", \"group\" : \"iceberg\" }"
-// +"}");
-// Assert.assertNotNull(response);
-// HiveMetaStoreClient client = createClient(conf);
-// Database database1 = client.getDatabase(ns);
-// Assert.assertEquals("apache", database1.getParameters().get("owner"));
-// Assert.assertEquals("iceberg", database1.getParameters().get("group"));
-//
-// Assert.assertSame(HMSCachingCatalog.class, catalog.getClass());
-// List tis = catalog.listTables(Namespace.of(ns));
-// Assert.assertTrue(tis.isEmpty());
-//
-// // list tables in hivedb
-// url = new URL("http://hive@localhost:" + catalogPort + "/" + catalogPath+"/v1/namespaces/" + ns + "/tables");
-// // succeed
-// response = clientCall(jwt, url, "GET", null);
-// Assert.assertNotNull(response);
-//
-// // quick check on metrics
-// Map counters = reportMetricCounters("list_namespaces", "list_tables");
-// counters.forEach((key, value) -> Assert.assertTrue(key, value > 0));
-// }
-//
-// private Schema getTestSchema() {
-// return new Schema(
-// required(1, "id", Types.IntegerType.get(), "unique ID"),
-// required(2, "data", Types.StringType.get()));
-// }
-//
-// @Test
-// public void testCreateTableTxnBuilder() throws Exception {
-// URI iceUri = URI.create("http://hive@localhost:" + catalogPort + "/"+catalogPath+"/v1/");
-// String jwt = generateJWT();
-// Schema schema = getTestSchema();
-// final String tblName = "tbl_" + Integer.toHexString(RND.nextInt(65536));
-// final TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, tblName);
-// String location = temp.newFolder(tableIdent.toString()).toString();
-//
-// try {
-// Transaction txn = catalog.buildTable(tableIdent, schema)
-// .withLocation(location)
-// .createTransaction();
-// txn.commitTransaction();
-// Table table = catalog.loadTable(tableIdent);
-//
-// Assert.assertEquals(location, table.location());
-// Assert.assertEquals(2, table.schema().columns().size());
-// Assert.assertTrue(table.spec().isUnpartitioned());
-// List tis = catalog.listTables(Namespace.of(DB_NAME));
-// Assert.assertFalse(tis.isEmpty());
-//
-// // list namespaces
-// URL url = iceUri.resolve("namespaces").toURL();
-// // succeed
-// Object response = clientCall(jwt, url, "GET", null);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// List> nslist = (List>) eval(response, "json -> json.namespaces");
-// Assert.assertEquals(2, nslist.size());
-// Assert.assertTrue((nslist.contains(Collections.singletonList("default"))));
-// Assert.assertTrue((nslist.contains(Collections.singletonList("hivedb"))));
-//
-// // list tables in hivedb
-// url = iceUri.resolve("namespaces/" + DB_NAME + "/tables").toURL();
-// // succeed
-// response = clientCall(jwt, url, "GET", null);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// Assert.assertEquals(1, (int) eval(response, "json -> size(json.identifiers)"));
-// Assert.assertEquals(tblName, eval(response, "json -> json.identifiers[0].name"));
-//
-// // load table
-// url = iceUri.resolve("namespaces/" + DB_NAME + "/tables/" + tblName).toURL();
-// // succeed
-// response = clientCall(jwt, url, "GET", null);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// Assert.assertEquals(location, eval(response, "json -> json.metadata.location"));
-//
-// // quick check on metrics
-// Map counters = reportMetricCounters("list_namespaces", "list_tables", "load_table");
-// counters.forEach((key, value) -> Assert.assertTrue(key, value > 0));
-// table = catalog.loadTable(tableIdent);
-// Assert.assertNotNull(table);
-// } catch (IOException xany) {
-// Assert.fail(xany.getMessage());
-// } finally {
-// catalog.dropTable(tableIdent, false);
-// }
-// }
-//
-// @Test
-// public void testTableAPI() throws Exception {
-// Assert.assertSame(HMSCachingCatalog.class, catalog.getClass());
-// URI iceUri = URI.create("http://hive@localhost:" + catalogPort + "/"+catalogPath+"/v1/");
-// String jwt = generateJWT();
-// Schema schema = getTestSchema();
-// final String tblName = "tbl_" + Integer.toHexString(RND.nextInt(65536));
-// final TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, tblName);
-// String location = temp.newFolder(tableIdent.toString()).toString();
-// // create table
-// CreateTableRequest create = CreateTableRequest.builder().
-// withName(tblName).
-// withLocation(location).
-// withSchema(schema).build();
-// URL url = iceUri.resolve("namespaces/" + DB_NAME + "/tables").toURL();
-// Object response = clientCall(jwt, url, "POST", create);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// Assert.assertEquals(location, eval(response, "json -> json.metadata.location"));
-// Table table = catalog.loadTable(tableIdent);
-// Assert.assertEquals(location, table.location());
-//
-// // rename table
-// final String rtblName = "TBL_" + Integer.toHexString(RND.nextInt(65536));
-// final TableIdentifier rtableIdent = TableIdentifier.of(DB_NAME, rtblName);
-// RenameTableRequest rename = RenameTableRequest.builder().
-// withSource(tableIdent).
-// withDestination(rtableIdent).
-// build();
-// url = iceUri.resolve("tables/rename").toURL();
-// response = clientCall(jwt, url, "POST", rename);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// table = catalog.loadTable(rtableIdent);
-// Assert.assertEquals(location, table.location());
-//
-// // delete table
-// url = iceUri.resolve("namespaces/" + DB_NAME + "/tables/" + rtblName).toURL();
-// response = clientCall(jwt, url, "DELETE", null);
-// Assert.assertNotNull(response);
-// Assert.assertEquals(200, (int) eval(response, "json -> json.status"));
-// Assert.assertThrows(NoSuchTableException.class, () -> catalog.loadTable(rtableIdent));
-// }
-//
-// public static final String CATALOG_CONFIG_PREFIX = "iceberg.catalog.";
-// public static final String CATALOG_NAME = "iceberg.catalog";
-//
-// private static Map getCatalogPropertiesFromConf(Configuration conf, String catalogName) {
-// Map catalogProperties = Maps.newHashMap();
-// String keyPrefix = CATALOG_CONFIG_PREFIX + catalogName;
-// conf.forEach(config -> {
-// if (config.getKey().startsWith(keyPrefix)) {
-// catalogProperties.put(
-// config.getKey().substring(keyPrefix.length() + 1),
-// config.getValue());
-// }
-// });
-// return catalogProperties;
-// }
-//
-// @Test
-// public void testBuildTable() throws Exception {
-// String cname = catalog.name();
-// URI iceUri = URI.create("http://localhost:" + catalogPort + "/"+catalogPath);
-// String jwt = generateJWT();
-// Schema schema = getTestSchema();
-// final String tblName = "tbl_" + Integer.toHexString(RND.nextInt(65536));
-// final TableIdentifier TBL = TableIdentifier.of(DB_NAME, tblName);
-// String location = temp.newFolder(TBL.toString()).toString();
-//
-// Configuration configuration = new Configuration();
-// configuration.set("iceberg.catalog", cname);
-// configuration.set("iceberg.catalog."+cname+".type", "rest");
-// configuration.set("iceberg.catalog."+cname+".uri", iceUri.toString());
-// configuration.set("iceberg.catalog."+cname+".token", jwt);
-//
-// String catalogName = configuration.get(CATALOG_NAME);
-// Assert.assertEquals(cname, catalogName);
-// Map properties = getCatalogPropertiesFromConf(configuration, catalogName);
-// Assert.assertFalse(properties.isEmpty());
-// RESTCatalog restCatalog = (RESTCatalog) CatalogUtil.buildIcebergCatalog(catalogName, properties, configuration);
-// restCatalog.initialize(catalogName, properties);
-//
-// restCatalog.buildTable(TBL, schema)
-// .withLocation(location)
-// .createTransaction()
-// .commitTransaction();
-// Table table = catalog.loadTable(TBL);
-// Assert.assertEquals(location, table.location());
-// Table restTable = restCatalog.loadTable(TBL);
-// Assert.assertEquals(location, restTable.location());
-// }
-//}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
index 25a1df0f5e45..466cf4603bb2 100644
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
@@ -39,7 +39,6 @@ public class RESTCatalogServer {
private static int createMetastoreServerWithRESTCatalog(int restPort, Configuration conf) throws Exception {
MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT, restPort);
- MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY, 1000L);
return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf,
true, false, false, false);
}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 67e009aedc9b..700d4f662230 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -684,7 +684,6 @@ private static void constraintHttpMethods(ServletContextHandler ctxHandler, bool
*/
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge,
Configuration conf, boolean startMetaStoreThreads, AtomicBoolean startedBackgroundThreads) throws Throwable {
- // If we start an Iceberg REST catalog, we need to listen to events to maintain its consistency.
isMetaStoreRemote = true;
String transportMode = MetastoreConf.getVar(conf, ConfVars.THRIFT_TRANSPORT_MODE, "binary");
boolean isHttpTransport = transportMode.equalsIgnoreCase("http");
From 4e9a52f34447dbd9b60cdb43a00fe69e597cc8a2 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Mon, 4 Aug 2025 14:36:55 +0200
Subject: [PATCH 09/27] - Improved loadTable cache handling
- Reduced redundant calls (avoid super call);
- Call invalidateTable for thorough eviction;
---
.../iceberg/rest/HMSCachingCatalog.java | 46 +++++++++++++++----
1 file changed, 37 insertions(+), 9 deletions(-)
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
index 14074da49df4..53d9d60faf45 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
@@ -19,13 +19,20 @@
package org.apache.iceberg.rest;
+import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Ticker;
import java.util.List;
import java.util.Map;
import java.util.Set;
+
+import org.apache.iceberg.BaseMetadataTable;
import org.apache.iceberg.CachingCatalog;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
+import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
@@ -64,23 +71,44 @@ public List listNamespaces(Namespace nmspc) throws NoSuchNamespaceExc
@Override
public Table loadTable(TableIdentifier identifier) {
- TableIdentifier canonicalIdentifier = identifier.toLowerCase();
- Table cachedTable = tableCache.getIfPresent(canonicalIdentifier);
+ final Cache cache = this.tableCache;
+ final HiveCatalog catalog = this.hiveCatalog;
+ final TableIdentifier canonicalized = identifier.toLowerCase();
+ Table cachedTable = cache.getIfPresent(canonicalized);
if (cachedTable != null) {
- String location = hiveCatalog.getTableLocation(canonicalIdentifier);
+ String location = catalog.getTableLocation(canonicalized);
if (location == null) {
- LOG.debug("Table {} has no location, returning cached table without location", canonicalIdentifier);
+ LOG.debug("Table {} has no location, returning cached table without location", canonicalized);
} else if (!location.equals(cachedTable.location())) {
LOG.debug("Cached table {} has a different location than the one in the catalog: {} != {}",
- canonicalIdentifier, cachedTable.location(), location);
+ canonicalized, cachedTable.location(), location);
+ // Invalidate the cached table if the location is different
+ invalidateTable(canonicalized);
} else {
- LOG.debug("Returning cached table: {}", canonicalIdentifier);
+ LOG.debug("Returning cached table: {}", canonicalized);
return cachedTable;
}
- // Invalidate the cached table if the location is different
- tableCache.invalidate(cachedTable);
}
- return super.loadTable(identifier);
+ Table table = cache.get(canonicalized, catalog::loadTable);
+ if (table instanceof BaseMetadataTable) {
+ // Cache underlying table
+ TableIdentifier originTableIdentifier =
+ TableIdentifier.of(canonicalized.namespace().levels());
+ Table originTable = cache.get(originTableIdentifier, catalog::loadTable);
+ // Share TableOperations instance of origin table for all metadata tables, so that metadata
+ // table instances are refreshed as well when origin table instance is refreshed.
+ if (originTable instanceof HasTableOperations) {
+ TableOperations ops = ((HasTableOperations) originTable).operations();
+ MetadataTableType type = MetadataTableType.from(canonicalized.name());
+
+ Table metadataTable =
+ MetadataTableUtils.createMetadataTableInstance(
+ ops, catalog.name(), originTableIdentifier, canonicalized, type);
+ cache.put(canonicalized, metadataTable);
+ return metadataTable;
+ }
+ }
+ return table;
}
@Override
From 8c7de034e5feca4967213b3c3ee026c0c0321cc4 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Wed, 6 Aug 2025 10:11:25 +0200
Subject: [PATCH 10/27] Update
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
Co-authored-by: Shohei Okumiya
---
.../main/java/org/apache/iceberg/rest/HMSCatalogFactory.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
index 508d17e533ed..f1f2a0e87aac 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
@@ -37,7 +37,7 @@
* can serve as a base for specialization.
*/
public class HMSCatalogFactory {
- private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
+ private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
protected final Configuration configuration;
protected final int port;
From 235771d11755db71edc980b8a8bc504150d244c8 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Thu, 7 Aug 2025 10:27:34 +0200
Subject: [PATCH 11/27] HIVE-29035: removed extensibility from
HMSCatalogFactory;
---
.../iceberg/rest/HMSCatalogFactory.java | 24 +++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
index f1f2a0e87aac..d30fee989def 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
@@ -36,12 +36,12 @@
* This class is derivable on purpose; the factory class name is a configuration property, this class
* can serve as a base for specialization.
*/
-public class HMSCatalogFactory {
+public final class HMSCatalogFactory {
private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
- protected final Configuration configuration;
- protected final int port;
- protected final String path;
+ private final Configuration configuration;
+ private final int port;
+ private final String path;
/**
* Factory constructor.
@@ -49,17 +49,17 @@ public class HMSCatalogFactory {
* declared in configuration and found through introspection.
* @param conf the configuration
*/
- protected HMSCatalogFactory(Configuration conf) {
+ private HMSCatalogFactory(Configuration conf) {
port = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT);
path = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_SERVLET_PATH);
this.configuration = conf;
}
-
- public int getPort() {
+
+ private int getPort() {
return port;
}
-
- public String getPath() {
+
+ private String getPath() {
return path;
}
@@ -67,7 +67,7 @@ public String getPath() {
* Creates the catalog instance.
* @return the catalog
*/
- protected Catalog createCatalog() {
+ private Catalog createCatalog() {
final Map properties = new TreeMap<>();
MetastoreConf.setVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS, "");
final String configUri = MetastoreConf.getVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS);
@@ -100,7 +100,7 @@ protected Catalog createCatalog() {
* @param catalog the Iceberg catalog
* @return the servlet
*/
- protected HttpServlet createServlet(Catalog catalog) {
+ private HttpServlet createServlet(Catalog catalog) {
String authType = MetastoreConf.getVar(configuration, ConfVars.CATALOG_SERVLET_AUTH);
ServletSecurity security = new ServletSecurity(AuthType.fromString(authType), configuration);
return security.proxy(new HMSCatalogServlet(new HMSCatalogAdapter(catalog)));
@@ -110,7 +110,7 @@ protected HttpServlet createServlet(Catalog catalog) {
* Creates the REST catalog servlet instance.
* @return the servlet
*/
- protected HttpServlet createServlet() {
+ private HttpServlet createServlet() {
if (port >= 0 && path != null && !path.isEmpty()) {
Catalog actualCatalog = createCatalog();
return createServlet(actualCatalog);
From 210a2ccabc2105b6cd229abbd3b87d68c76113ad Mon Sep 17 00:00:00 2001
From: Henrib
Date: Tue, 12 Aug 2025 18:23:33 +0200
Subject: [PATCH 12/27] HIVE-29035: fixing table metadata location check
mistake; - made factory & cache extensible; - added specific test to verify
cache behavior;
---
.../org/apache/iceberg/hive/HiveCatalog.java | 4 +-
.../iceberg/rest/HMSCachingCatalog.java | 76 ++++--
.../iceberg/rest/HMSCatalogFactory.java | 33 ++-
.../iceberg/rest/TestServerCatalogCache.java | 217 ++++++++++++++++++
.../rest/extension/HMSTestCachingCatalog.java | 84 +++++++
.../rest/extension/HMSTestCatalogFactory.java | 80 +++++++
.../HiveRESTCatalogServerExtension.java | 9 +-
7 files changed, 473 insertions(+), 30 deletions(-)
create mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
create mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
create mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCatalogFactory.java
diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
index ce30a647c74e..f548ebc99aea 100644
--- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
+++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java
@@ -414,7 +414,7 @@ public boolean tableExists(TableIdentifier identifier) {
}
/**
- * Check whether table or metadata table exists and return its location.
+ * Check whether table exists and return its current metadata location.
*
* Note: If a hive table with the same identifier exists in catalog, this method will return
* {@code null}.
@@ -422,7 +422,7 @@ public boolean tableExists(TableIdentifier identifier) {
* @param identifier a table identifier
* @return the location of the table if it exists, null otherwise
*/
- public String getTableLocation(TableIdentifier identifier) {
+ public String getTableMetadataLocation(TableIdentifier identifier) {
Table table = fetchTable(identifier);
if (table == null) {
return null;
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
index 53d9d60faf45..9656469c1edf 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
@@ -24,7 +24,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-
import org.apache.iceberg.BaseMetadataTable;
import org.apache.iceberg.CachingCatalog;
import org.apache.iceberg.HasTableOperations;
@@ -32,6 +31,7 @@
import org.apache.iceberg.MetadataTableUtils;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
+import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
@@ -52,7 +52,7 @@
*/
public class HMSCachingCatalog extends CachingCatalog implements SupportsNamespaces, ViewCatalog {
private static final Logger LOG = LoggerFactory.getLogger(HMSCachingCatalog.class);
- private final HiveCatalog hiveCatalog;
+ protected final HiveCatalog hiveCatalog;
public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
super(catalog, false, expiration, Ticker.systemTicker());
@@ -69,25 +69,69 @@ public List listNamespaces(Namespace nmspc) throws NoSuchNamespaceExc
return hiveCatalog.listNamespaces(nmspc);
}
+ protected void cacheInvalidateInc(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
+ }
+
+ protected void cacheLoadInc(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
+ }
+
+ protected void cacheHitInc(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
+ }
+
+ protected void cacheMissInc(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
+ }
+
+ protected void cacheMetaLoadInc(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
+ }
+
+ /**
+ * Gets the metadata file location of a table.
+ *
+ * @param table the table
+ * @return the location of the metadata file, or null if the table does not have a location
+ */
+ protected static String getMetadataLocation(final Table table) {
+ if (table instanceof HasTableOperations tableOps) {
+ final TableOperations ops = tableOps.operations();
+ final TableMetadata meta;
+ if (ops != null && (meta = ops.current()) != null) {
+ return meta.metadataFileLocation();
+ }
+ }
+ return null;
+ }
+
@Override
- public Table loadTable(TableIdentifier identifier) {
+ public Table loadTable(final TableIdentifier identifier) {
final Cache cache = this.tableCache;
final HiveCatalog catalog = this.hiveCatalog;
final TableIdentifier canonicalized = identifier.toLowerCase();
Table cachedTable = cache.getIfPresent(canonicalized);
if (cachedTable != null) {
- String location = catalog.getTableLocation(canonicalized);
+ final String location = catalog.getTableMetadataLocation(canonicalized);
if (location == null) {
LOG.debug("Table {} has no location, returning cached table without location", canonicalized);
- } else if (!location.equals(cachedTable.location())) {
- LOG.debug("Cached table {} has a different location than the one in the catalog: {} != {}",
- canonicalized, cachedTable.location(), location);
- // Invalidate the cached table if the location is different
- invalidateTable(canonicalized);
} else {
- LOG.debug("Returning cached table: {}", canonicalized);
- return cachedTable;
+ String cachedLocation = getMetadataLocation(cachedTable);
+ if (!location.equals(cachedLocation)) {
+ LOG.debug("Invalidate table {}, cached location {} != actual location {}", canonicalized, cachedLocation, location);
+ // Invalidate the cached table if the location is different
+ invalidateTable(canonicalized);
+ cacheInvalidateInc(canonicalized);
+ } else {
+ LOG.debug("Returning cached table: {}", canonicalized);
+ cacheHitInc(canonicalized);
+ return cachedTable;
+ }
}
+ } else {
+ LOG.debug("Cache miss for table: {}", canonicalized);
+ cacheMissInc(canonicalized);
}
Table table = cache.get(canonicalized, catalog::loadTable);
if (table instanceof BaseMetadataTable) {
@@ -97,17 +141,21 @@ public Table loadTable(TableIdentifier identifier) {
Table originTable = cache.get(originTableIdentifier, catalog::loadTable);
// Share TableOperations instance of origin table for all metadata tables, so that metadata
// table instances are refreshed as well when origin table instance is refreshed.
- if (originTable instanceof HasTableOperations) {
- TableOperations ops = ((HasTableOperations) originTable).operations();
+ if (originTable instanceof HasTableOperations originTableOps) {
+ TableOperations ops = originTableOps.operations();
MetadataTableType type = MetadataTableType.from(canonicalized.name());
-
Table metadataTable =
MetadataTableUtils.createMetadataTableInstance(
ops, catalog.name(), originTableIdentifier, canonicalized, type);
cache.put(canonicalized, metadataTable);
+ cacheMetaLoadInc(canonicalized);
+ LOG.debug("Loaded metadata table: {} for origin table: {}", canonicalized, originTableIdentifier);
+ // Return the metadata table instead of the original table
return metadataTable;
}
}
+ cacheLoadInc(canonicalized);
+ LOG.debug("Loaded table: {} ", canonicalized);
return table;
}
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
index d30fee989def..952b9165b141 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
@@ -36,12 +36,12 @@
* This class is derivable on purpose; the factory class name is a configuration property, this class
* can serve as a base for specialization.
*/
-public final class HMSCatalogFactory {
+public class HMSCatalogFactory {
private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
- private final Configuration configuration;
- private final int port;
- private final String path;
+ protected final Configuration configuration;
+ protected final int port;
+ protected final String path;
/**
* Factory constructor.
@@ -49,17 +49,17 @@ public final class HMSCatalogFactory {
* declared in configuration and found through introspection.
* @param conf the configuration
*/
- private HMSCatalogFactory(Configuration conf) {
+ protected HMSCatalogFactory(Configuration conf) {
port = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT);
path = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_SERVLET_PATH);
this.configuration = conf;
}
- private int getPort() {
+ protected final int getPort() {
return port;
}
- private String getPath() {
+ protected final String getPath() {
return path;
}
@@ -67,7 +67,7 @@ private String getPath() {
* Creates the catalog instance.
* @return the catalog
*/
- private Catalog createCatalog() {
+ protected Catalog createCatalog() {
final Map properties = new TreeMap<>();
MetastoreConf.setVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS, "");
final String configUri = MetastoreConf.getVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS);
@@ -91,8 +91,19 @@ private Catalog createCatalog() {
hiveCatalog.setConf(configuration);
final String catalogName = MetastoreConf.getVar(configuration, MetastoreConf.ConfVars.CATALOG_DEFAULT);
hiveCatalog.initialize(catalogName, properties);
+ return cacheCatalog(hiveCatalog);
+ }
+
+ /**
+ * Wraps the catalog in a caching catalog.
+ * By default, the catalog is wrapped in {@link HMSCachingCatalog} that caches tables.
+ * @param hiveCatalog the Iceberg catalog
+ * @return the caching catalog
+ */
+ protected Catalog cacheCatalog(HiveCatalog hiveCatalog) {
+ // If the catalog is not a caching catalog, wrap it in one
long expiry = MetastoreConf.getLongVar(configuration, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY);
- return expiry > 0 ? new HMSCachingCatalog(hiveCatalog, expiry) : hiveCatalog;
+ return new HMSCachingCatalog(hiveCatalog, expiry);
}
/**
@@ -100,7 +111,7 @@ private Catalog createCatalog() {
* @param catalog the Iceberg catalog
* @return the servlet
*/
- private HttpServlet createServlet(Catalog catalog) {
+ protected HttpServlet createServlet(Catalog catalog) {
String authType = MetastoreConf.getVar(configuration, ConfVars.CATALOG_SERVLET_AUTH);
ServletSecurity security = new ServletSecurity(AuthType.fromString(authType), configuration);
return security.proxy(new HMSCatalogServlet(new HMSCatalogAdapter(catalog)));
@@ -110,7 +121,7 @@ private HttpServlet createServlet(Catalog catalog) {
* Creates the REST catalog servlet instance.
* @return the servlet
*/
- private HttpServlet createServlet() {
+ protected HttpServlet createServlet() {
if (port >= 0 && path != null && !path.isEmpty()) {
Catalog actualCatalog = createCatalog();
return createServlet(actualCatalog);
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
new file mode 100644
index 000000000000..df7b2a10ca0b
--- /dev/null
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.Files;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.data.parquet.GenericParquetWriter;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.io.FileAppender;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.parquet.Parquet;
+import org.apache.iceberg.rest.extension.HMSTestCachingCatalog;
+import org.apache.iceberg.rest.extension.HMSTestCatalogFactory;
+import org.apache.iceberg.rest.extension.HiveRESTCatalogServerExtension;
+import org.apache.iceberg.types.Types;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+
+import static org.apache.iceberg.types.Types.NestedField.required;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * The test verifies the behavior of the HMSCachingCatalog.
+ *
+ * The cache relies on the table metadata location to determine whether a table has changed between a cached entry
+ * and the latest version in the HMS database.
+ * The test will create a table, load it, insert some data, and check that the cache correctly invalidates
+ * cached entries when the table is modified.
+ *
+ */
+class TestServerCatalogCache {
+ private static final String NEWDB = "newdb";
+ private static final String TBL = "tbl";
+ private static final String TEMPDIR = "file:/tmp/junit" + Long.toHexString(System.currentTimeMillis()) + "/";
+ private static final String ID = "id";
+ private static final String DATA = "data";
+ private static final Namespace NS = Namespace.of(new String[]{NEWDB});
+ private static RESTCatalog restCatalog;
+ private static HiveRESTCatalogServerExtension restServer =
+ HiveRESTCatalogServerExtension.builder(AuthType.NONE).build(testConfiguration());
+
+ private static Configuration testConfiguration() {
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_FACTORY, HMSTestCatalogFactory.class.getName());
+ return conf;
+ }
+
+ private static String baseTableLocation(TableIdentifier identifier) {
+ Namespace ns = identifier.namespace();
+ return TEMPDIR + ns + "/" + identifier.name();
+ }
+
+ @BeforeAll
+ static void setupAll() throws Exception {
+ restServer.beforeAll(null);
+ HMSTestCachingCatalog cachingCatalog = HMSTestCatalogFactory.getLastCatalog();
+ Assertions.assertNotNull(cachingCatalog);
+
+ Map catalogProperties = new HashMap<>();
+ catalogProperties.putIfAbsent("uri", restServer.getRestEndpoint());
+ catalogProperties.putIfAbsent("warehouse", "rck_warehouse");
+ restCatalog = new RESTCatalog();
+ restCatalog.setConf(new Configuration());
+ restCatalog.initialize("hive", catalogProperties);
+ }
+
+ @BeforeEach
+ void setup() {
+ try {
+ restServer.beforeEach(null);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ RCKUtils.purgeCatalogTestEntries(restCatalog);
+ }
+
+ @AfterAll
+ static void teardownAll() throws Exception {
+ if (restCatalog != null) {
+ restCatalog.close();
+ }
+ restServer.afterAll(null);
+ restServer = null;
+ }
+
+ @Test
+ void testTableCache() {
+ restCatalog.createNamespace(NS);
+ // acquire the server cache
+ HMSTestCachingCatalog cachingCatalog = HMSTestCatalogFactory.getLastCatalog();
+ Assertions.assertNotNull(cachingCatalog);
+ HiveCatalog catalog = cachingCatalog.getHiveCatalog();
+ // create a table schema
+ Schema schema = new Schema(
+ required(1, ID, Types.IntegerType.get()),
+ required(2, DATA, Types.StringType.get()));
+ TableIdentifier tableIdent = TableIdentifier.of(NEWDB, TBL);
+ String location = baseTableLocation(tableIdent);
+ try {
+ // create a table in the catalog bypassing the cache
+ // using restCatalog would defeat the purpose since it would use the cache
+ Table table = catalog
+ .buildTable(tableIdent, schema)
+ .withLocation(location)
+ .create();
+ assertThat(table.location()).isEqualTo(location);
+ assertThat(table.schema().columns()).hasSize(2);
+ // check that the table is *not* yet in the catalog (hit 0, miss 1, load 1)
+ Table l0 = cachingCatalog.loadTable(tableIdent);
+ assertThat(table.location()).isEqualTo(location);
+ assertThat(l0).isNotEqualTo(table);
+ Map m0 = cachingCatalog.getCacheMetrics();
+ assertThat(m0).containsEntry("hit", 0);
+ assertThat(m0).containsEntry("miss", 1);
+ assertThat(m0).containsEntry("invalidation", 0);
+ assertThat(m0).containsEntry("load", 1);
+ // load the table multiple times, find it in the cache (hit 10)
+ for (int i = 0; i < 10; i++) {
+ Table l = cachingCatalog.loadTable(tableIdent);
+ assertEquals(l0, l);
+ }
+ // load the table multiple times through the REST catalog, find it in the cache (hit 10)
+ for (int i = 0; i < 10; i++) {
+ Table l = restCatalog.loadTable(tableIdent);
+ // not the same instance, but the same metadata location
+ assertEquals(metadataLocation(l0), metadataLocation(l));
+ }
+ m0 = cachingCatalog.getCacheMetrics();
+ assertThat(m0).containsEntry("hit", 20);
+ // add rows through table; new snapshot implies invalidation
+ insertRows(table);
+ // load again, should provoke invalidation (invalidation 1, miss 1, load 2)
+ Table l1 = cachingCatalog.loadTable(tableIdent);
+ assertThat(l1).isNotEqualTo(l0);
+ Map m2 = cachingCatalog.getCacheMetrics();
+ assertThat(m2).containsEntry("invalidation", 1);
+ assertThat(m2).containsEntry("miss",1);
+ assertThat(m2).containsEntry("load", 2);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ catalog.dropTable(tableIdent);
+ HMSTestCatalogFactory.clearLastCatalog();
+ }
+ }
+
+ private static String metadataLocation(Table table) {
+ return HMSCachingCatalog.getMetadataLocation(table);
+ }
+
+ private void insertRows(Table table) throws IOException {
+ org.apache.iceberg.data.Record genericRecord = GenericRecord.create(table.schema());
+ // write the records to a Parquet file
+ final int records = 8;
+ URI tempDirUri = URI.create(TEMPDIR);
+ File file = Paths.get(tempDirUri).resolve("test_cache.parquet").toFile();
+ OutputFile outputFile = Files.localOutput(file);
+ try (FileAppender appender = Parquet.write(outputFile)
+ .schema(table.schema())
+ .createWriterFunc(GenericParquetWriter::buildWriter)
+ .build()) {
+ for (int i= 0; i < records; ++i) {
+ genericRecord.setField(ID, i);
+ genericRecord.setField(DATA, Integer.toBinaryString(i));
+ appender.add(genericRecord);
+ }
+ }
+ // create a DataFile from the Parquet file
+ DataFile dataFile = DataFiles.builder(table.spec())
+ .withPath(file.getAbsolutePath())
+ .withFileSizeInBytes(file.length())
+ .withRecordCount(records)
+ .build();
+ // append the DataFile to the table
+ table.newAppend()
+ .appendFile(dataFile)
+ .commit();
+ }
+}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
new file mode 100644
index 000000000000..26c2ffc9446d
--- /dev/null
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest.extension;
+
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.rest.HMSCachingCatalog;
+
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A test class for HMSCachingCatalog that tracks cache metrics for test verifications.
+ * It extends HMSCachingCatalog and overrides methods to increment counters for cache hits,
+ * misses, invalidations, and loads.
+ */
+public class HMSTestCachingCatalog extends HMSCachingCatalog {
+ private final HiveCatalog hiveCatalog;
+ protected final AtomicInteger cacheHitCount = new AtomicInteger(0);
+ protected final AtomicInteger cacheMissCount = new AtomicInteger(0);
+ protected final AtomicInteger cacheInvalidationCount = new AtomicInteger(0);
+ protected final AtomicInteger cacheLoadCount = new AtomicInteger(0);
+
+ /**
+ * Constructor for HMSTestCachingCatalog.
+ *
+ * @param catalog the HiveCatalog to wrap
+ * @param expiration the cache expiration time in milliseconds
+ */
+ public HMSTestCachingCatalog(HiveCatalog catalog, long expiration) {
+ super(catalog, expiration);
+ this.hiveCatalog = catalog;
+ }
+
+ public Map getCacheMetrics() {
+ return Map.of(
+ "hit", cacheHitCount.get(),
+ "miss", cacheMissCount.get(),
+ "invalidation", cacheInvalidationCount.get(),
+ "load", cacheLoadCount.get()
+ );
+ }
+
+ public HiveCatalog getHiveCatalog() {
+ return hiveCatalog;
+ }
+
+ @Override
+ protected void cacheInvalidateInc(TableIdentifier tid) {
+ cacheInvalidationCount.incrementAndGet();
+ }
+
+ @Override
+ protected void cacheLoadInc(TableIdentifier tid) {
+ cacheLoadCount.incrementAndGet();
+ }
+
+ @Override
+ protected void cacheHitInc(TableIdentifier tid) {
+ cacheHitCount.incrementAndGet();
+ }
+
+ @Override
+ protected void cacheMissInc(TableIdentifier tid) {
+ cacheMissCount.incrementAndGet();
+ }
+}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCatalogFactory.java
new file mode 100644
index 000000000000..611a5d1e512d
--- /dev/null
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCatalogFactory.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest.extension;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.ServletServerBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.rest.HMSCatalogFactory;
+
+import javax.servlet.http.HttpServlet;
+import java.lang.ref.Reference;
+import java.lang.ref.SoftReference;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Factory for creating a test catalog that caches the last created catalog.
+ * This class is used in tests to verify the behavior of the caching catalog.
+ */
+public class HMSTestCatalogFactory extends HMSCatalogFactory {
+ static final AtomicReference> catRef = new AtomicReference<>(null);
+ /**
+ * Factory constructor.
+ * Called by the static method {@link HMSTestCatalogFactory#createServlet(Configuration)} that is
+ * declared in configuration and found through introspection.
+ *
+ * @param conf the configuration
+ */
+ protected HMSTestCatalogFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ protected Catalog cacheCatalog(HiveCatalog hiveCatalog) {
+ long expiry = MetastoreConf.getLongVar(configuration, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY);
+ HMSTestCachingCatalog cc = new HMSTestCachingCatalog(hiveCatalog, expiry);
+ catRef.set(new SoftReference<>(cc));
+ return cc;
+ }
+
+ public static HMSTestCachingCatalog getLastCatalog() {
+ Reference ref = catRef.get();
+ return ref.get();
+ }
+
+ public static void clearLastCatalog() {
+ catRef.set(null);
+ }
+
+ /**
+ * Creates the servlet instance.
+ * @return the servlet
+ */
+ public static ServletServerBuilder.Descriptor createServlet(Configuration configuration) {
+ HMSTestCatalogFactory hms = new HMSTestCatalogFactory(configuration);
+ HttpServlet servlet = hms.createServlet();
+ if (servlet != null) {
+ return new ServletServerBuilder.Descriptor(hms.getPort(), hms.getPath(), servlet);
+ }
+ return null;
+ }
+}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java
index 6b9a3f751472..6347e542d0bb 100644
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HiveRESTCatalogServerExtension.java
@@ -42,8 +42,8 @@ public class HiveRESTCatalogServerExtension implements BeforeAllCallback, Before
private final JwksServer jwksServer;
private final RESTCatalogServer restCatalogServer;
- private HiveRESTCatalogServerExtension(AuthType authType) {
- this.conf = MetastoreConf.newMetastoreConf();
+ private HiveRESTCatalogServerExtension(AuthType authType, Configuration configuration) {
+ this.conf = configuration == null ? MetastoreConf.newMetastoreConf() : configuration;
MetastoreConf.setVar(conf, ConfVars.CATALOG_SERVLET_AUTH, authType.name());
if (authType == AuthType.JWT) {
jwksServer = new JwksServer();
@@ -104,7 +104,10 @@ private Builder(AuthType authType) {
}
public HiveRESTCatalogServerExtension build() {
- return new HiveRESTCatalogServerExtension(authType);
+ return new HiveRESTCatalogServerExtension(authType, null);
+ }
+ public HiveRESTCatalogServerExtension build(Configuration configuration) {
+ return new HiveRESTCatalogServerExtension(authType, configuration);
}
}
From 308892f7e96c323900e0513c6a3e648f2ee539a1 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Thu, 14 Aug 2025 15:14:03 +0200
Subject: [PATCH 13/27] HIVE-29035 : renaming, addressing review comments;
---
.../iceberg/rest/HMSCachingCatalog.java | 73 ++++++++++---------
.../iceberg/rest/TestServerCatalogCache.java | 5 +-
.../rest/extension/HMSTestCachingCatalog.java | 10 +--
3 files changed, 48 insertions(+), 40 deletions(-)
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
index 9656469c1edf..ab98d7aa5a75 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
@@ -31,7 +31,6 @@
import org.apache.iceberg.MetadataTableUtils;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
-import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
@@ -46,12 +45,11 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
/**
* Class that wraps an Iceberg Catalog to cache tables.
*/
public class HMSCachingCatalog extends CachingCatalog implements SupportsNamespaces, ViewCatalog {
- private static final Logger LOG = LoggerFactory.getLogger(HMSCachingCatalog.class);
+ protected static final Logger LOG = LoggerFactory.getLogger(HMSCachingCatalog.class);
protected final HiveCatalog hiveCatalog;
public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
@@ -69,41 +67,48 @@ public List listNamespaces(Namespace nmspc) throws NoSuchNamespaceExc
return hiveCatalog.listNamespaces(nmspc);
}
- protected void cacheInvalidateInc(TableIdentifier tid) {
- // This method is intentionally left empty. It can be overridden in subclasses if needed.
- }
-
- protected void cacheLoadInc(TableIdentifier tid) {
+ /**
+ * Callback when cache invalidates the entry for a given table identifier.
+ *
+ * @param tid the table identifier to invalidate
+ */
+ protected void onCacheInvalidate(TableIdentifier tid) {
// This method is intentionally left empty. It can be overridden in subclasses if needed.
}
- protected void cacheHitInc(TableIdentifier tid) {
+ /**
+ * Callback when cache loads a table for a given table identifier.
+ *
+ * @param tid the table identifier
+ */
+ protected void onCacheLoad(TableIdentifier tid) {
// This method is intentionally left empty. It can be overridden in subclasses if needed.
}
- protected void cacheMissInc(TableIdentifier tid) {
+ /**
+ * Callback when cache hit for a given table identifier.
+ *
+ * @param tid the table identifier
+ */
+ protected void onCacheHit(TableIdentifier tid) {
// This method is intentionally left empty. It can be overridden in subclasses if needed.
}
- protected void cacheMetaLoadInc(TableIdentifier tid) {
+ /**
+ * Callback when cache miss occurs for a given table identifier.
+ *
+ * @param tid the table identifier
+ */
+ protected void onCacheMiss(TableIdentifier tid) {
// This method is intentionally left empty. It can be overridden in subclasses if needed.
}
-
/**
- * Gets the metadata file location of a table.
+ * Callback when cache loads a metadata table for a given table identifier.
*
- * @param table the table
- * @return the location of the metadata file, or null if the table does not have a location
+ * @param tid the table identifier
*/
- protected static String getMetadataLocation(final Table table) {
- if (table instanceof HasTableOperations tableOps) {
- final TableOperations ops = tableOps.operations();
- final TableMetadata meta;
- if (ops != null && (meta = ops.current()) != null) {
- return meta.metadataFileLocation();
- }
- }
- return null;
+ protected void onCacheMetaLoad(TableIdentifier tid) {
+ // This method is intentionally left empty. It can be overridden in subclasses if needed.
}
@Override
@@ -117,21 +122,23 @@ public Table loadTable(final TableIdentifier identifier) {
if (location == null) {
LOG.debug("Table {} has no location, returning cached table without location", canonicalized);
} else {
- String cachedLocation = getMetadataLocation(cachedTable);
+ String cachedLocation = cachedTable instanceof HasTableOperations tableOps
+ ? tableOps.operations().current().metadataFileLocation()
+ : null;
if (!location.equals(cachedLocation)) {
- LOG.debug("Invalidate table {}, cached location {} != actual location {}", canonicalized, cachedLocation, location);
+ LOG.debug("Invalidate table {}, cached {} != actual {}", canonicalized, cachedLocation, location);
// Invalidate the cached table if the location is different
invalidateTable(canonicalized);
- cacheInvalidateInc(canonicalized);
+ onCacheInvalidate(canonicalized);
} else {
LOG.debug("Returning cached table: {}", canonicalized);
- cacheHitInc(canonicalized);
+ onCacheHit(canonicalized);
return cachedTable;
}
}
} else {
LOG.debug("Cache miss for table: {}", canonicalized);
- cacheMissInc(canonicalized);
+ onCacheMiss(canonicalized);
}
Table table = cache.get(canonicalized, catalog::loadTable);
if (table instanceof BaseMetadataTable) {
@@ -141,20 +148,20 @@ public Table loadTable(final TableIdentifier identifier) {
Table originTable = cache.get(originTableIdentifier, catalog::loadTable);
// Share TableOperations instance of origin table for all metadata tables, so that metadata
// table instances are refreshed as well when origin table instance is refreshed.
- if (originTable instanceof HasTableOperations originTableOps) {
- TableOperations ops = originTableOps.operations();
+ if (originTable instanceof HasTableOperations tableOps) {
+ TableOperations ops = tableOps.operations();
MetadataTableType type = MetadataTableType.from(canonicalized.name());
Table metadataTable =
MetadataTableUtils.createMetadataTableInstance(
ops, catalog.name(), originTableIdentifier, canonicalized, type);
cache.put(canonicalized, metadataTable);
- cacheMetaLoadInc(canonicalized);
+ onCacheMetaLoad(canonicalized);
LOG.debug("Loaded metadata table: {} for origin table: {}", canonicalized, originTableIdentifier);
// Return the metadata table instead of the original table
return metadataTable;
}
}
- cacheLoadInc(canonicalized);
+ onCacheLoad(canonicalized);
LOG.debug("Loaded table: {} ", canonicalized);
return table;
}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
index df7b2a10ca0b..6e10fde67e6c 100644
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestServerCatalogCache.java
@@ -32,6 +32,7 @@
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.Files;
+import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Namespace;
@@ -183,7 +184,9 @@ void testTableCache() {
}
private static String metadataLocation(Table table) {
- return HMSCachingCatalog.getMetadataLocation(table);
+ return table instanceof HasTableOperations tableOps
+ ? tableOps.operations().current().metadataFileLocation()
+ : null;
}
private void insertRows(Table table) throws IOException {
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
index 26c2ffc9446d..6fcffb6eaa28 100644
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/HMSTestCachingCatalog.java
@@ -32,7 +32,6 @@
* misses, invalidations, and loads.
*/
public class HMSTestCachingCatalog extends HMSCachingCatalog {
- private final HiveCatalog hiveCatalog;
protected final AtomicInteger cacheHitCount = new AtomicInteger(0);
protected final AtomicInteger cacheMissCount = new AtomicInteger(0);
protected final AtomicInteger cacheInvalidationCount = new AtomicInteger(0);
@@ -46,7 +45,6 @@ public class HMSTestCachingCatalog extends HMSCachingCatalog {
*/
public HMSTestCachingCatalog(HiveCatalog catalog, long expiration) {
super(catalog, expiration);
- this.hiveCatalog = catalog;
}
public Map getCacheMetrics() {
@@ -63,22 +61,22 @@ public HiveCatalog getHiveCatalog() {
}
@Override
- protected void cacheInvalidateInc(TableIdentifier tid) {
+ protected void onCacheInvalidate(TableIdentifier tid) {
cacheInvalidationCount.incrementAndGet();
}
@Override
- protected void cacheLoadInc(TableIdentifier tid) {
+ protected void onCacheLoad(TableIdentifier tid) {
cacheLoadCount.incrementAndGet();
}
@Override
- protected void cacheHitInc(TableIdentifier tid) {
+ protected void onCacheHit(TableIdentifier tid) {
cacheHitCount.incrementAndGet();
}
@Override
- protected void cacheMissInc(TableIdentifier tid) {
+ protected void onCacheMiss(TableIdentifier tid) {
cacheMissCount.incrementAndGet();
}
}
From ce5fb8aed38ff46ee4206655331cfcc4c192c660 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Mon, 30 Jun 2025 17:36:59 +0200
Subject: [PATCH 14/27] HIVE-29016: rebasing;
---
.../hive/metastore/conf/MetastoreConf.java | 5 +
.../iceberg/rest/HMSCachingCatalog.java | 23 +++++
.../iceberg/rest/HMSCatalogFactory.java | 33 +++++--
.../apache/iceberg/rest/HMSEventListener.java | 97 +++++++++++++++++++
.../rest/extension/RESTCatalogServer.java | 1 +
.../hadoop/hive/metastore/HiveMetaStore.java | 27 ++++++
.../hive/metastore/ServletSecurity.java | 10 ++
7 files changed, 189 insertions(+), 7 deletions(-)
create mode 100644 standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 91e68d7921a4..3f9eb0c4c38b 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -1884,6 +1884,11 @@ public enum ConfVars {
"hive.metastore.iceberg.catalog.cache.expiry", -1,
"HMS Iceberg Catalog cache expiry."
),
+ ICEBERG_CATALOG_EVENT_LISTENER_CLASS("hive.metastore.catalog.event.listener.class",
+ "hive.metastore.catalog.event.listener.class",
+ "org.apache.iceberg.rest.HMSEventListener",
+ "HMS Iceberg Catalog event listener class name."
+ ),
HTTPSERVER_THREADPOOL_MIN("hive.metastore.httpserver.threadpool.min",
"hive.metastore.httpserver.threadpool.min", 8,
"HMS embedded HTTP server minimum number of threads."
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
index edb5fbd41a9b..93c905a36e65 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java
@@ -35,12 +35,15 @@
import org.apache.iceberg.hive.HiveCatalog;
import org.apache.iceberg.view.View;
import org.apache.iceberg.view.ViewBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Class that wraps an Iceberg Catalog to cache tables.
*/
public class HMSCachingCatalog extends CachingCatalog implements SupportsNamespaces, ViewCatalog {
+ private static final Logger LOG = LoggerFactory.getLogger(HMSCachingCatalog.class);
private final HiveCatalog hiveCatalog;
public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
@@ -92,6 +95,26 @@ public boolean namespaceExists(Namespace namespace) {
return hiveCatalog.namespaceExists(namespace);
}
+ public void invalidateTable(String dbName, String tableName) {
+ super.invalidateTable(TableIdentifier.of(dbName, tableName));
+ }
+
+ @Override
+ public void invalidateTable(TableIdentifier tableIdentifier) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Avoid invalidating table: {}", tableIdentifier);
+ }
+ super.invalidateTable(tableIdentifier);
+ }
+
+
+ public void invalidateNamespace(String namespace) {
+ Namespace ns = Namespace.of(namespace);
+ for (TableIdentifier table : listTables(ns)) {
+ invalidateTable(table);
+ }
+ }
+
@Override
public List listViews(Namespace namespace) {
return hiveCatalog.listViews(namespace);
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
index 682e7c9e2649..862289da4705 100644
--- a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogFactory.java
@@ -18,8 +18,11 @@
*/
package org.apache.iceberg.rest;
+import java.lang.ref.Reference;
+import java.lang.ref.SoftReference;
import java.util.Map;
import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicReference;
import javax.servlet.http.HttpServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.ServletSecurity;
@@ -37,9 +40,23 @@
public class HMSCatalogFactory {
private static final String SERVLET_ID_KEY = "metastore.in.test.iceberg.catalog.servlet.id";
- private final Configuration configuration;
- private final int port;
- private final String path;
+ /**
+ * Convenience soft reference to last catalog.
+ */
+ protected static final AtomicReference> catalogRef = new AtomicReference<>();
+
+ public static Catalog getLastCatalog() {
+ Reference soft = catalogRef.get();
+ return soft != null ? soft.get() : null;
+ }
+
+ protected static void setLastCatalog(Catalog catalog) {
+ catalogRef.set(new SoftReference<>(catalog));
+ }
+
+ protected final Configuration configuration;
+ protected final int port;
+ protected final String path;
/**
* Factory constructor.
@@ -65,7 +82,7 @@ public String getPath() {
* Creates the catalog instance.
* @return the catalog
*/
- private Catalog createCatalog() {
+ protected Catalog createCatalog() {
final Map properties = new TreeMap<>();
MetastoreConf.setVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS, "");
final String configUri = MetastoreConf.getVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS);
@@ -98,7 +115,7 @@ private Catalog createCatalog() {
* @param catalog the Iceberg catalog
* @return the servlet
*/
- private HttpServlet createServlet(Catalog catalog) {
+ protected HttpServlet createServlet(Catalog catalog) {
String authType = MetastoreConf.getVar(configuration, ConfVars.CATALOG_SERVLET_AUTH);
ServletSecurity security = new ServletSecurity(AuthType.fromString(authType), configuration);
return security.proxy(new HMSCatalogServlet(new HMSCatalogAdapter(catalog)));
@@ -108,9 +125,11 @@ private HttpServlet createServlet(Catalog catalog) {
* Creates the REST catalog servlet instance.
* @return the servlet
*/
- private HttpServlet createServlet() {
+ protected HttpServlet createServlet() {
if (port >= 0 && path != null && !path.isEmpty()) {
- return createServlet(createCatalog());
+ Catalog actualCatalog = createCatalog();
+ setLastCatalog(actualCatalog);
+ return createServlet(actualCatalog);
}
return null;
}
diff --git a/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
new file mode 100644
index 000000000000..256b24ffe253
--- /dev/null
+++ b/standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSEventListener.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ReloadEvent;
+import org.apache.iceberg.catalog.Catalog;
+import org.slf4j.Logger;
+
+/**
+ * IcebergEventListener is a Hive Metastore event listener that invalidates the cache
+ * of the HMSCachingCatalog when certain events occur, such as altering or dropping a table.
+ */
+public class HMSEventListener extends MetaStoreEventListener {
+ private static final Logger LOG = org.slf4j.LoggerFactory.getLogger(HMSEventListener.class);
+ /**
+ * Constructor for HMSEventListener.
+ *
+ * @param config the configuration to use for the listener
+ */
+ public HMSEventListener(Configuration config) {
+ super(config);
+ }
+
+
+ private Catalog getCatalog() {
+ return HMSCatalogFactory.getLastCatalog();
+ }
+
+ @Override
+ public void onAlterTable(AlterTableEvent event) {
+ Catalog catalog = getCatalog();
+ if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
+ String dbName = event.getOldTable().getDbName();
+ String tableName = event.getOldTable().getTableName();
+ LOG.debug("onAlterTable: invalidating table cache for {}.{}", dbName, tableName);
+ hmsCachingCatalog.invalidateTable(dbName, tableName);
+ }
+ }
+
+ @Override
+ public void onDropTable(DropTableEvent event) {
+ Catalog catalog = getCatalog();
+ if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
+ String dbName = event.getTable().getDbName();
+ String tableName = event.getTable().getTableName();
+ LOG.debug("onDropTable: invalidating table cache for {}.{}", dbName, tableName);
+ hmsCachingCatalog.invalidateTable(dbName, tableName);
+ }
+ }
+
+ @Override
+ public void onReload(ReloadEvent reloadEvent) {
+ Catalog catalog = getCatalog();
+ if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
+ Table tableObj = reloadEvent.getTableObj();
+ String dbName = tableObj.getDbName();
+ String tableName = tableObj.getTableName();
+ LOG.debug("onReload: invalidating table cache for {}.{}", dbName, tableName);
+ hmsCachingCatalog.invalidateTable(dbName, tableName);
+ }
+ }
+
+ @Override
+ public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
+ Catalog catalog = getCatalog();
+ if (catalog instanceof HMSCachingCatalog hmsCachingCatalog) {
+ String dbName = dbEvent.getDatabase().getName();
+ LOG.debug("onDropDatabase: invalidating tables cache for {}", dbName);
+ hmsCachingCatalog.invalidateNamespace(dbName);
+ }
+ }
+
+}
diff --git a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
index 7d2aac692db5..2bae896cd6ed 100644
--- a/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
+++ b/standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/extension/RESTCatalogServer.java
@@ -41,6 +41,7 @@ public class RESTCatalogServer {
private static int createMetastoreServerWithRESTCatalog(int restPort, Configuration conf) throws Exception {
MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT, restPort);
+ MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY, 1000L);
return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf,
true, false, false, false);
}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index e0368373a60c..3f489713aae7 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -658,6 +658,31 @@ private static void constraintHttpMethods(ServletContextHandler ctxHandler, bool
}
ctxHandler.setSecurityHandler(securityHandler);
}
+
+ /**
+ * Configure the metastore to propagate events to eventual Iceberg catalog.
+ * @param conf the configuration
+ */
+ private static void configureIcebergCacheHandling(Configuration conf) {
+ // If we start a REST catalog, we need to listen to events to maintain its consistency.
+ String eventListenerClass = MetastoreConf.getVar(conf, ConfVars.ICEBERG_CATALOG_EVENT_LISTENER_CLASS);
+ if (eventListenerClass != null && !eventListenerClass.isEmpty()) {
+ // if expiry is negative, no cache is used, so no need to register the listener
+ long expiry = MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY);
+ // if the port is negative, no REST catalog is configured, so no need to register the listener
+ int icebergPort = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.CATALOG_SERVLET_PORT);
+ if (icebergPort >= 0 && expiry > 0) {
+ LOG.info("Configuring Iceberg catalog event listener: {}", eventListenerClass);
+ String listeners = MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS);
+ if (listeners == null || listeners.isEmpty()) {
+ MetastoreConf.setVar(conf, ConfVars.EVENT_LISTENERS, eventListenerClass);
+ } else {
+ MetastoreConf.setVar(conf, ConfVars.EVENT_LISTENERS, listeners + "," + eventListenerClass);
+ }
+ }
+ }
+ }
+
/**
* Start Metastore based on a passed {@link HadoopThriftAuthBridge}.
*
@@ -672,6 +697,8 @@ private static void constraintHttpMethods(ServletContextHandler ctxHandler, bool
*/
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge,
Configuration conf, boolean startMetaStoreThreads, AtomicBoolean startedBackgroundThreads) throws Throwable {
+ // If we start an Iceberg REST catalog, we need to listen to events to maintain its consistency.
+ configureIcebergCacheHandling(conf);
isMetaStoreRemote = true;
if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.USE_THRIFT_SASL) &&
MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.THRIFT_ZOOKEEPER_USE_KERBEROS)) {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ServletSecurity.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ServletSecurity.java
index 22985875f223..186d10f568f2 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ServletSecurity.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ServletSecurity.java
@@ -19,6 +19,7 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.auth.HttpAuthenticationException;
import org.apache.hadoop.hive.metastore.auth.jwt.JWTValidator;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -238,6 +239,15 @@ public void execute(HttpServletRequest request, HttpServletResponse response, Me
Thread.currentThread().interrupt();
} catch (RuntimeException e) {
throw new IOException("Exception when executing http request as user: "+ clientUgi.getUserName(), e);
+ } finally {
+ try {
+ FileSystem.closeAllForUGI(clientUgi);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Successfully cleaned up FileSystem handles for user: {}", clientUgi.getUserName());
+ }
+ } catch (IOException cleanupException) {
+ LOG.error("Failed to clean up FileSystem handles for UGI: {}", clientUgi, cleanupException);
+ }
}
}
From e51cbd2be34c361218940ce305167706c33db7f0 Mon Sep 17 00:00:00 2001
From: Henrib
Date: Fri, 20 Jun 2025 15:23:00 +0200
Subject: [PATCH 15/27] HIVE-29016: fixing cache handling for REST catalog; -
add an event listener to invalidate cached tables impervious to source of
change (direct HMS or REST); - added configuration option for event class
handler; - lengthened default cache TTL;
---
.../metastore-rest-catalog/pom.xml | 6 +
.../iceberg/rest/HMSCachingCatalog.java | 19 +-
.../apache/iceberg/rest/TestHMSCatalog.java | 265 ++++++++++++++++++
3 files changed, 284 insertions(+), 6 deletions(-)
create mode 100644 standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCatalog.java
diff --git a/standalone-metastore/metastore-rest-catalog/pom.xml b/standalone-metastore/metastore-rest-catalog/pom.xml
index edf41fdc17a7..8d7a24bbe2ea 100644
--- a/standalone-metastore/metastore-rest-catalog/pom.xml
+++ b/standalone-metastore/metastore-rest-catalog/pom.xml
@@ -84,6 +84,12 @@
3.27.3
test