diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java index d2c4fb7acbd88a..80444f45657582 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java @@ -21,9 +21,11 @@ import org.apache.doris.datasource.property.metastore.MetastoreProperties; import org.apache.doris.datasource.property.storage.StorageProperties; +import com.aliyun.odps.table.utils.Preconditions; import com.google.common.collect.Maps; import com.google.gson.annotations.SerializedName; import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -127,7 +129,8 @@ public Map getStoragePropertiesMap() .collect(Collectors.toMap(StorageProperties::getType, Function.identity())); } catch (UserException e) { LOG.warn("Failed to initialize catalog storage properties", e); - throw new RuntimeException("Failed to initialize storage properties for catalog", e); + throw new RuntimeException("Failed to initialize storage properties, error: " + + ExceptionUtils.getRootCauseMessage(e), e); } } } @@ -135,6 +138,25 @@ public Map getStoragePropertiesMap() return storagePropertiesMap; } + public void checkMetaStoreAndStorageProperties(Class msClass) { + MetastoreProperties msProperties; + List storageProperties; + try { + msProperties = MetastoreProperties.create(getProperties()); + storageProperties = StorageProperties.createAll(getProperties()); + } catch (UserException e) { + throw new RuntimeException("Failed to initialize Catalog properties, error: " + + ExceptionUtils.getRootCauseMessage(e), e); + } + Preconditions.checkNotNull(storageProperties, + "Storage properties are not configured properly"); + Preconditions.checkNotNull(msProperties, "Metastore properties are not configured properly"); + Preconditions.checkArgument( + msClass.isInstance(msProperties), + String.format("Metastore properties type is not correct. Expected %s but got %s", + msClass.getName(), msProperties.getClass().getName())); + } + /** * Get metastore properties with lazy loading, using double-check locking to ensure thread safety */ @@ -150,7 +172,8 @@ public MetastoreProperties getMetastoreProperties() { metastoreProperties = MetastoreProperties.create(getProperties()); } catch (UserException e) { LOG.warn("Failed to create metastore properties", e); - throw new RuntimeException("Failed to create metastore properties", e); + throw new RuntimeException("Failed to create metastore properties, error: " + + ExceptionUtils.getRootCauseMessage(e), e); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java index 527abdb207234b..3a9c90568f7537 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java @@ -18,11 +18,9 @@ package org.apache.doris.datasource.hive; import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.HdfsResource; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.DdlException; import org.apache.doris.common.ThreadPoolManager; -import org.apache.doris.common.UserException; import org.apache.doris.common.util.Util; import org.apache.doris.datasource.CatalogProperty; import org.apache.doris.datasource.ExternalCatalog; @@ -34,14 +32,12 @@ import org.apache.doris.datasource.iceberg.IcebergUtils; import org.apache.doris.datasource.operations.ExternalMetadataOperations; import org.apache.doris.datasource.property.metastore.AbstractHMSProperties; -import org.apache.doris.datasource.property.metastore.MetastoreProperties; import org.apache.doris.fs.FileSystemProvider; import org.apache.doris.fs.FileSystemProviderImpl; import org.apache.doris.fs.remote.dfs.DFSFileSystem; import org.apache.doris.transaction.TransactionManagerFactory; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; import org.apache.commons.lang3.math.NumberUtils; import org.apache.iceberg.hive.HiveCatalog; import org.apache.logging.log4j.LogManager; @@ -94,14 +90,6 @@ public AbstractHMSProperties getHmsProperties() { return hmsProperties; } - private void initAndCheckHmsProperties() { - try { - this.hmsProperties = (AbstractHMSProperties) MetastoreProperties.create(catalogProperty.getProperties()); - } catch (UserException e) { - throw new RuntimeException("Failed to create HMSProperties from catalog properties", e); - } - } - @VisibleForTesting public HMSExternalCatalog() { catalogProperty = new CatalogProperty(null, null); @@ -134,41 +122,7 @@ public void checkProperties() throws DdlException { throw new DdlException( "The parameter " + PARTITION_CACHE_TTL_SECOND + " is wrong, value is " + partitionCacheTtlSecond); } - - // check the dfs.ha properties - // 'dfs.nameservices'='your-nameservice', - // 'dfs.ha.namenodes.your-nameservice'='nn1,nn2', - // 'dfs.namenode.rpc-address.your-nameservice.nn1'='172.21.0.2:4007', - // 'dfs.namenode.rpc-address.your-nameservice.nn2'='172.21.0.3:4007', - // 'dfs.client.failover.proxy.provider.your-nameservice'='xxx' - String dfsNameservices = catalogProperty.getOrDefault(HdfsResource.DSF_NAMESERVICES, ""); - if (Strings.isNullOrEmpty(dfsNameservices)) { - return; - } - - String[] nameservices = dfsNameservices.split(","); - for (String dfsservice : nameservices) { - String namenodes = catalogProperty.getOrDefault("dfs.ha.namenodes." + dfsservice, ""); - if (Strings.isNullOrEmpty(namenodes)) { - throw new DdlException("Missing dfs.ha.namenodes." + dfsservice + " property"); - } - String[] names = namenodes.split(","); - for (String name : names) { - String address = catalogProperty.getOrDefault("dfs.namenode.rpc-address." + dfsservice + "." + name, - ""); - if (Strings.isNullOrEmpty(address)) { - throw new DdlException( - "Missing dfs.namenode.rpc-address." + dfsservice + "." + name + " property"); - } - } - String failoverProvider = catalogProperty.getOrDefault("dfs.client.failover.proxy.provider." + dfsservice, - ""); - if (Strings.isNullOrEmpty(failoverProvider)) { - throw new DdlException( - "Missing dfs.client.failover.proxy.provider." + dfsservice + " property"); - } - } - initAndCheckHmsProperties(); + catalogProperty.checkMetaStoreAndStorageProperties(AbstractHMSProperties.class); } @Override @@ -180,7 +134,7 @@ protected synchronized void initPreExecutionAuthenticator() { @Override protected void initLocalObjectsImpl() { - initAndCheckHmsProperties(); + this.hmsProperties = (AbstractHMSProperties) catalogProperty.getMetastoreProperties(); initPreExecutionAuthenticator(); HiveMetadataOps hiveOps = ExternalMetadataOperations.newHiveMetadataOps(hmsProperties.getHiveConf(), this); threadPoolWithPreAuth = ThreadPoolManager.newDaemonFixedThreadPoolWithPreAuth( @@ -190,7 +144,7 @@ protected void initLocalObjectsImpl() { true, executionAuthenticator); FileSystemProvider fileSystemProvider = new FileSystemProviderImpl(Env.getCurrentEnv().getExtMetaCacheMgr(), - this.catalogProperty.getStoragePropertiesMap()); + this.catalogProperty.getStoragePropertiesMap()); this.fileSystemExecutor = ThreadPoolManager.newDaemonFixedThreadPool(FILE_SYSTEM_EXECUTOR_THREAD_NUM, Integer.MAX_VALUE, String.format("hms_committer_%s_file_system_executor_pool", name), true); transactionManager = TransactionManagerFactory.createHiveTransactionManager(hiveOps, fileSystemProvider, diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java index b1178363b9b245..f1a655456e013e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java @@ -17,14 +17,13 @@ package org.apache.doris.datasource.iceberg; +import org.apache.doris.common.DdlException; import org.apache.doris.common.ThreadPoolManager; -import org.apache.doris.common.UserException; import org.apache.doris.datasource.ExternalCatalog; import org.apache.doris.datasource.InitCatalogLog; import org.apache.doris.datasource.SessionContext; import org.apache.doris.datasource.operations.ExternalMetadataOperations; import org.apache.doris.datasource.property.metastore.AbstractIcebergProperties; -import org.apache.doris.datasource.property.metastore.MetastoreProperties; import org.apache.doris.transaction.TransactionManagerFactory; import org.apache.iceberg.catalog.Catalog; @@ -54,14 +53,11 @@ public IcebergExternalCatalog(long catalogId, String name, String comment) { // Create catalog based on catalog type protected void initCatalog() { try { - msProperties = (AbstractIcebergProperties) MetastoreProperties - .create(getProperties()); + msProperties = (AbstractIcebergProperties) catalogProperty.getMetastoreProperties(); this.catalog = msProperties.initializeCatalog(getName(), new ArrayList<>(catalogProperty .getStoragePropertiesMap().values())); this.icebergCatalogType = msProperties.getIcebergCatalogType(); - } catch (UserException e) { - throw new RuntimeException("Failed to initialize Iceberg catalog: " + e.getMessage(), e); } catch (ClassCastException e) { throw new RuntimeException("Invalid properties for Iceberg catalog: " + getProperties(), e); } catch (Exception e) { @@ -69,6 +65,12 @@ protected void initCatalog() { } } + @Override + public void checkProperties() throws DdlException { + super.checkProperties(); + catalogProperty.checkMetaStoreAndStorageProperties(AbstractIcebergProperties.class); + } + @Override protected synchronized void initPreExecutionAuthenticator() { if (executionAuthenticator == null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java index 7cd6fa0f2c8fd6..76e093656bbc84 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java @@ -18,14 +18,12 @@ package org.apache.doris.datasource.paimon; import org.apache.doris.common.DdlException; -import org.apache.doris.common.UserException; import org.apache.doris.datasource.CatalogProperty; import org.apache.doris.datasource.ExternalCatalog; import org.apache.doris.datasource.InitCatalogLog; import org.apache.doris.datasource.NameMapping; import org.apache.doris.datasource.SessionContext; import org.apache.doris.datasource.property.metastore.AbstractPaimonProperties; -import org.apache.doris.datasource.property.metastore.MetastoreProperties; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.logging.log4j.LogManager; @@ -60,12 +58,7 @@ public PaimonExternalCatalog(long catalogId, String name, String resource, Map getPaimonOptionsMap() { @Override public void checkProperties() throws DdlException { - if (null != paimonProperties) { - try { - this.paimonProperties = (AbstractPaimonProperties) MetastoreProperties - .create(catalogProperty.getProperties()); - } catch (UserException e) { - throw new DdlException("Failed to create Paimon properties from catalog properties, exception: " - + ExceptionUtils.getRootCauseMessage(e), e); - } - } + super.checkProperties(); + catalogProperty.checkMetaStoreAndStorageProperties(AbstractPaimonProperties.class); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java index c1b8a7b872475b..99c170f8ce24bc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java @@ -119,6 +119,7 @@ public void initNormalizeAndCheckProps() { initBackendConfigProperties(); this.hadoopStorageConfig = new Configuration(); this.backendConfigProperties.forEach(hadoopStorageConfig::set); + HdfsPropertiesUtils.checkHaConfig(backendConfigProperties); hadoopAuthenticator = HadoopAuthenticator.getHadoopAuthenticator(hadoopStorageConfig); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java index 11662a8e51b722..7fc091daa648b4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java @@ -20,7 +20,9 @@ import org.apache.doris.common.UserException; import org.apache.doris.datasource.property.storage.exception.StoragePropertiesException; +import com.google.common.base.Strings; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import java.io.UnsupportedEncodingException; import java.net.URI; @@ -28,8 +30,12 @@ import java.net.URLDecoder; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; public class HdfsPropertiesUtils { private static final String URI_KEY = "uri"; @@ -127,7 +133,7 @@ public static String validateAndNormalizeUri(String location, Set suppor } public static String validateAndNormalizeUri(String location, String host, String defaultFs, - Set supportedSchemas) { + Set supportedSchemas) { if (StringUtils.isBlank(location)) { throw new IllegalArgumentException("Property 'uri' is required."); } @@ -179,4 +185,76 @@ public static String validateAndNormalizeUri(String location, String host, Strin throw new StoragePropertiesException("Failed to parse URI: " + location, e); } } + + /** + * Validate the required HDFS HA configuration properties. + * + *

This method checks the following: + *

    + *
  • {@code dfs.nameservices} must be defined if HA is enabled.
  • + *
  • {@code dfs.ha.namenodes.} must be defined and contain at least 2 namenodes.
  • + *
  • For each namenode, {@code dfs.namenode.rpc-address..} must be defined.
  • + *
  • {@code dfs.client.failover.proxy.provider.} must be defined.
  • + *
+ * + * @param hdfsProperties configuration map (similar to core-site.xml/hdfs-site.xml properties) + */ + public static void checkHaConfig(Map hdfsProperties) { + if (hdfsProperties == null) { + return; + } + // 1. Check dfs.nameservices + String dfsNameservices = hdfsProperties.getOrDefault(HdfsClientConfigKeys.DFS_NAMESERVICES, ""); + if (Strings.isNullOrEmpty(dfsNameservices)) { + // No nameservice configured => HA is not enabled, nothing to validate + return; + } + for (String dfsservice : splitAndTrim(dfsNameservices)) { + if (dfsservice.isEmpty()) { + continue; + } + // 2. Check dfs.ha.namenodes. + String haNnKey = HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + dfsservice; + String namenodes = hdfsProperties.getOrDefault(haNnKey, ""); + if (Strings.isNullOrEmpty(namenodes)) { + throw new IllegalArgumentException("Missing property: " + haNnKey); + } + List names = splitAndTrim(namenodes); + if (names.size() < 2) { + throw new IllegalArgumentException("HA requires at least 2 namenodes for service: " + dfsservice); + } + // 3. Check dfs.namenode.rpc-address.. + for (String name : names) { + String rpcKey = HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + dfsservice + "." + name; + String address = hdfsProperties.getOrDefault(rpcKey, ""); + if (Strings.isNullOrEmpty(address)) { + throw new IllegalArgumentException("Missing property: " + rpcKey + " (expected format: host:port)"); + } + } + // 4. Check dfs.client.failover.proxy.provider. + String failoverKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + dfsservice; + String failoverProvider = hdfsProperties.getOrDefault(failoverKey, ""); + if (Strings.isNullOrEmpty(failoverProvider)) { + throw new IllegalArgumentException("Missing property: " + failoverKey); + } + } + } + + /** + * Utility method to split a comma-separated string, trim whitespace, + * and remove empty tokens. + * + * @param s the input string + * @return list of trimmed non-empty values + */ + private static List splitAndTrim(String s) { + if (Strings.isNullOrEmpty(s)) { + return Collections.emptyList(); + } + return Arrays.stream(s.split(",")) + .map(String::trim) + .filter(tok -> !tok.isEmpty()) + .collect(Collectors.toList()); + } } + diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java index 9a5ada45bbb208..a896931babeedc 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java @@ -38,7 +38,14 @@ public class LocationPathTest { static { Map props = new HashMap<>(); - props.put("dfs.nameservices", "namenode:8020"); + props.put("dfs.nameservices", "ns1"); + // NameNodes for this nameservice + props.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + // RPC addresses for each NameNode + props.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020"); + props.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020"); + props.put("dfs.client.failover.proxy.provider.ns1", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); props.put("s3.endpoint", "s3.us-east-2.amazonaws.com"); props.put("s3.access_key", "access_key"); props.put("s3.secret_key", "secret_key"); @@ -74,7 +81,14 @@ public void testHdfsLocationConvert() throws UserException { // HA props Map props = new HashMap<>(); - props.put("dfs.nameservices", "ns"); + props.put("dfs.nameservices", "ns1"); + // NameNodes for this nameservice + props.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + // RPC addresses for each NameNode + props.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020"); + props.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020"); + props.put("dfs.client.failover.proxy.provider.ns1", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); //HdfsProperties hdfsProperties = (HdfsProperties) StorageProperties.createPrimary( props); Map storagePropertiesMap = StorageProperties.createAll(props).stream() .collect(java.util.stream.Collectors.toMap(StorageProperties::getType, Function.identity())); @@ -231,7 +245,7 @@ public void testLocationProperties() { assertNormalize("cosn://bucket/path/to/file", "s3://bucket/path/to/file"); assertNormalize("viewfs://cluster/path/to/file", "viewfs://cluster/path/to/file"); assertNormalize("/path/to/file", "hdfs://namenode:8020/path/to/file"); - assertNormalize("hdfs:///path/to/file", "hdfs://namenode:8020/path/to/file"); + assertNormalize("hdfs:///path/to/file", "hdfs://ns1/path/to/file"); } private void assertNormalize(String input, String expected) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java index c634ad64592292..01e430fb84118f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java @@ -76,6 +76,10 @@ public void testBasicHdfsPropertiesCreateByConfigFile() throws UserException { // Test 3: Valid config resources (should succeed) origProps.put("hadoop.config.resources", "hadoop1/core-site.xml,hadoop1/hdfs-site.xml"); + origProps.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + origProps.put("dfs.namenode.rpc-address.ns1.nn1", "localhost:9000"); + origProps.put("dfs.namenode.rpc-address.ns1.nn2", "localhost:9001"); + origProps.put("dfs.client.failover.proxy.provider.ns1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); List storageProperties = StorageProperties.createAll(origProps); HdfsProperties hdfsProperties = (HdfsProperties) storageProperties.get(0); Configuration conf = hdfsProperties.getHadoopStorageConfig(); @@ -129,6 +133,8 @@ public void testBasicCreateByOldProperties() throws UserException { origProps.put("dfs.nameservices", "ns1"); origProps.put("dfs.ha.namenodes.ns1", "nn1,nn2"); origProps.put("dfs.namenode.rpc-address.ns1.nn1", "localhost:9000"); + origProps.put("dfs.namenode.rpc-address.ns1.nn2", "localhost:9001"); + origProps.put("dfs.client.failover.proxy.provider.ns1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); origProps.put("hadoop.async.threads.max", "10"); properties = StorageProperties.createAll(origProps).get(0); Assertions.assertEquals(properties.getClass(), HdfsProperties.class); diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java index 36378f4023ac9d..eed7360206b1b9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java @@ -145,4 +145,74 @@ public void testConvertUrlToFilePath_uppercaseSchema() throws Exception { String result = HdfsPropertiesUtils.convertUrlToFilePath(uri, "", supportSchema); Assertions.assertEquals("HDFS://localhost:9000/test", result); } + + @Test + public void testValidHaConfig() { + Map config = new HashMap<>(); + config.put("dfs.nameservices", "ns1"); + config.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020"); + config.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020"); + config.put("dfs.client.failover.proxy.provider.ns1", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); + + // Valid HA configuration should pass without exception + Assertions.assertDoesNotThrow(() -> HdfsPropertiesUtils.checkHaConfig(config)); + } + + @Test + public void testNoNameservices() { + Map config = new HashMap<>(); + // No dfs.nameservices configured → not HA mode, should not throw + Assertions.assertDoesNotThrow(() -> HdfsPropertiesUtils.checkHaConfig(config)); + } + + @Test + public void testMissingHaNamenodes() { + Map config = new HashMap<>(); + config.put("dfs.nameservices", "ns1"); + // dfs.ha.namenodes.ns1 missing + IllegalArgumentException ex = Assertions.assertThrows(IllegalArgumentException.class, + () -> HdfsPropertiesUtils.checkHaConfig(config)); + Assertions.assertTrue(ex.getMessage().contains("dfs.ha.namenodes.ns1")); + } + + @Test + public void testNotEnoughNamenodes() { + Map config = new HashMap<>(); + config.put("dfs.nameservices", "ns1"); + config.put("dfs.ha.namenodes.ns1", "nn1"); // Only 1 namenode + IllegalArgumentException ex = Assertions.assertThrows(IllegalArgumentException.class, + () -> HdfsPropertiesUtils.checkHaConfig(config)); + Assertions.assertTrue(ex.getMessage().contains("HA requires at least 2 namenodes")); + } + + @Test + public void testMissingRpcAddress() { + Map config = new HashMap<>(); + config.put("dfs.nameservices", "ns1"); + config.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020"); + // nn2 rpc-address missing + config.put("dfs.client.failover.proxy.provider.ns1", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); + + IllegalArgumentException ex = Assertions.assertThrows(IllegalArgumentException.class, + () -> HdfsPropertiesUtils.checkHaConfig(config)); + Assertions.assertTrue(ex.getMessage().contains("dfs.namenode.rpc-address.ns1.nn2")); + } + + @Test + public void testMissingFailoverProvider() { + Map config = new HashMap<>(); + config.put("dfs.nameservices", "ns1"); + config.put("dfs.ha.namenodes.ns1", "nn1,nn2"); + config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020"); + config.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020"); + // failover provider missing + + IllegalArgumentException ex = Assertions.assertThrows(IllegalArgumentException.class, + () -> HdfsPropertiesUtils.checkHaConfig(config)); + Assertions.assertTrue(ex.getMessage().contains("dfs.client.failover.proxy.provider.ns1")); + } } diff --git a/regression-test/suites/auth_call/test_assistant_command_auth.groovy b/regression-test/suites/auth_call/test_assistant_command_auth.groovy index 1f150d196e13af..c6b8b2d68faf59 100644 --- a/regression-test/suites/auth_call/test_assistant_command_auth.groovy +++ b/regression-test/suites/auth_call/test_assistant_command_auth.groovy @@ -56,7 +56,9 @@ suite("test_assistant_command_auth","p0,auth_call") { logger.info("insert_res: " + insert_res) sql """create catalog if not exists ${catalogName} properties ( - 'type'='hms' + 'type'='hms', + 'hive.metastore.uris' = 'thrift://127.0.0.1:9083' + );""" diff --git a/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy index 89ad3b4c1043d0..f2f6a97cb4887d 100644 --- a/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy +++ b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy @@ -25,7 +25,8 @@ suite("test_ddl_catalog_auth","p0,auth_call") { String catalogNameOther = 'test_ddl_catalog_auth_catalog_other' sql """create catalog if not exists ${catalogNameOther} properties ( - 'type'='hms' + 'type'='hms', + 'hive.metastore.uris'='thrift://127.0.0.1:9083' );""" try_sql("DROP USER ${user}") @@ -46,7 +47,8 @@ suite("test_ddl_catalog_auth","p0,auth_call") { connect(user, "${pwd}", context.config.jdbcUrl) { test { sql """create catalog if not exists ${catalogName} properties ( - 'type'='hms' + 'type'='hms', + 'hive.metastore.uris'='thrift://127.0.0.1:9083' );""" exception "denied" } @@ -54,13 +56,15 @@ suite("test_ddl_catalog_auth","p0,auth_call") { assertTrue(ctl_res.size() == 1) } sql """create catalog if not exists ${catalogName} properties ( - 'type'='hms' + 'type'='hms', + 'hive.metastore.uris'='thrift://127.0.0.1:9083' );""" sql """grant Create_priv on ${catalogName}.*.* to ${user}""" sql """drop catalog ${catalogName}""" connect(user, "${pwd}", context.config.jdbcUrl) { sql """create catalog if not exists ${catalogName} properties ( - 'type'='hms' + 'type'='hms', + 'hive.metastore.uris'='thrift://127.0.0.1:9083' );""" sql """show create catalog ${catalogName}""" def ctl_res = sql """show catalogs;"""