You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I have searched in the issues and found no similar issues.
What would you like to be improved?
When the Hive table corresponding to a Mixed Hive table is deleted, deleting the Mixed Hive table now will throw an exception.
But it shouldn't throw an exception and execute the table drop logic normally.
java.lang.RuntimeException: Failed to get hive table:xxxxx.xxxx_xxxxx
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.syncHiveSchemaToArctic(HiveMetaSynchronizer.java:88)
at com.netease.arctic.hive.table.KeyedHiveTable.syncHiveSchemaToArctic(KeyedHiveTable.java:94)
at com.netease.arctic.hive.table.KeyedHiveTable.(KeyedHiveTable.java:60)
at com.netease.arctic.hive.catalog.ArcticHiveCatalog.loadKeyedTable(ArcticHiveCatalog.java:180)
at com.netease.arctic.hive.catalog.ArcticHiveCatalog.loadKeyedTable(ArcticHiveCatalog.java:68)
at com.netease.arctic.catalog.BasicArcticCatalog.loadTableByMeta(BasicArcticCatalog.java:178)
at com.netease.arctic.catalog.BasicArcticCatalog.loadTable(BasicArcticCatalog.java:173)
at com.netease.arctic.spark.ArcticSparkCatalog.loadTable(ArcticSparkCatalog.java:142)
at org.apache.spark.sql.connector.catalog.CatalogV2Util$.loadTable(CatalogV2Util.scala:282)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$$anonfun$apply$9.applyOrElse(Analyzer.scala:1013)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$$anonfun$apply$9.applyOrElse(Analyzer.scala:996)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$3(AnalysisHelper.scala:90)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:73)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$1(AnalysisHelper.scala:90)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:221)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:86)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:84)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$2(AnalysisHelper.scala:87)
at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$mapChildren$1(TreeNode.scala:407)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:243)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:405)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:358)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$1(AnalysisHelper.scala:87)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:221)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:86)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:84)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$.apply(Analyzer.scala:996)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$.apply(Analyzer.scala:995)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:216)
at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:122)
at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:118)
at scala.collection.immutable.List.foldLeft(List.scala:85)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:213)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:205)
at scala.collection.immutable.List.foreach(List.scala:388)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:205)
at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:196)
at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:190)
at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:155)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:183)
at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:88)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:183)
at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:174)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:228)
at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:173)
at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:73)
at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:143)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:143)
at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:73)
at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:71)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:63)
at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:98)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:615)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:610)
at com.netease.arctic.ams.server.terminal.local.LocalTerminalSession.executeStatement(LocalTerminalSession.java:65)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.executeStatement(TerminalSessionContext.java:275)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.execute(TerminalSessionContext.java:238)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.lambda$get$0(TerminalSessionContext.java:199)
at com.netease.arctic.table.TableMetaStore.doAsUgi(TableMetaStore.java:365)
at com.netease.arctic.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:345)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:360)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1873)
at com.netease.arctic.table.TableMetaStore.doAs(TableMetaStore.java:345)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.get(TerminalSessionContext.java:191)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.get(TerminalSessionContext.java:162)
at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1590)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: NoSuchObjectException(message:xxxxx.xxxxx table not found)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result$get_table_resultStandardScheme.read(ThriftHiveMetastore.java:51552)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result$get_table_resultStandardScheme.read(ThriftHiveMetastore.java:51520)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result.read(ThriftHiveMetastore.java:51451)
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:88)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_get_table(ThriftHiveMetastore.java:1466)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.get_table(ThriftHiveMetastore.java:1452)
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.getTable(HiveMetaStoreClient.java:1332)
at com.netease.arctic.hive.HMSClientImpl.getTable(HMSClientImpl.java:74)
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.lambda$syncHiveSchemaToArctic$0(HiveMetaSynchronizer.java:78)
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:58)
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:51)
at com.netease.arctic.hive.CachedHiveClientPool.lambda$run$3(CachedHiveClientPool.java:86)
at com.netease.arctic.table.TableMetaStore.doAsUgi(TableMetaStore.java:365)
at com.netease.arctic.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:345)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:360)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1873)
at com.netease.arctic.table.TableMetaStore.doAs(TableMetaStore.java:345)
at com.netease.arctic.hive.CachedHiveClientPool.run(CachedHiveClientPool.java:86)
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.syncHiveSchemaToArctic(HiveMetaSynchronizer.java:78)
... 76 more
How should we improve?
According to the stack, you should not try to sync hive schema to mixed-hive table when the hive table doesn't exist anymore
Search before asking
What would you like to be improved?
When the Hive table corresponding to a Mixed Hive table is deleted, deleting the Mixed Hive table now will throw an exception.
But it shouldn't throw an exception and execute the table drop logic normally.
java.lang.RuntimeException: Failed to get hive table:xxxxx.xxxx_xxxxx
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.syncHiveSchemaToArctic(HiveMetaSynchronizer.java:88)
at com.netease.arctic.hive.table.KeyedHiveTable.syncHiveSchemaToArctic(KeyedHiveTable.java:94)
at com.netease.arctic.hive.table.KeyedHiveTable.(KeyedHiveTable.java:60)
at com.netease.arctic.hive.catalog.ArcticHiveCatalog.loadKeyedTable(ArcticHiveCatalog.java:180)
at com.netease.arctic.hive.catalog.ArcticHiveCatalog.loadKeyedTable(ArcticHiveCatalog.java:68)
at com.netease.arctic.catalog.BasicArcticCatalog.loadTableByMeta(BasicArcticCatalog.java:178)
at com.netease.arctic.catalog.BasicArcticCatalog.loadTable(BasicArcticCatalog.java:173)
at com.netease.arctic.spark.ArcticSparkCatalog.loadTable(ArcticSparkCatalog.java:142)
at org.apache.spark.sql.connector.catalog.CatalogV2Util$.loadTable(CatalogV2Util.scala:282)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$$anonfun$apply$9.applyOrElse(Analyzer.scala:1013)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$$anonfun$apply$9.applyOrElse(Analyzer.scala:996)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$3(AnalysisHelper.scala:90)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:73)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$1(AnalysisHelper.scala:90)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:221)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:86)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:84)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$2(AnalysisHelper.scala:87)
at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$mapChildren$1(TreeNode.scala:407)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:243)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:405)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:358)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUp$1(AnalysisHelper.scala:87)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:221)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:86)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp$(AnalysisHelper.scala:84)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$.apply(Analyzer.scala:996)
at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables$.apply(Analyzer.scala:995)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:216)
at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:122)
at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:118)
at scala.collection.immutable.List.foldLeft(List.scala:85)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:213)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:205)
at scala.collection.immutable.List.foreach(List.scala:388)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:205)
at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:196)
at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:190)
at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:155)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:183)
at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:88)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:183)
at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:174)
at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:228)
at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:173)
at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:73)
at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:143)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:143)
at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:73)
at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:71)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:63)
at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:98)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:615)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:610)
at com.netease.arctic.ams.server.terminal.local.LocalTerminalSession.executeStatement(LocalTerminalSession.java:65)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.executeStatement(TerminalSessionContext.java:275)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.execute(TerminalSessionContext.java:238)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.lambda$get$0(TerminalSessionContext.java:199)
at com.netease.arctic.table.TableMetaStore.doAsUgi(TableMetaStore.java:365)
at com.netease.arctic.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:345)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:360)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1873)
at com.netease.arctic.table.TableMetaStore.doAs(TableMetaStore.java:345)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.get(TerminalSessionContext.java:191)
at com.netease.arctic.ams.server.terminal.TerminalSessionContext$ExecutionTask.get(TerminalSessionContext.java:162)
at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1590)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: NoSuchObjectException(message:xxxxx.xxxxx table not found)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result$get_table_resultStandardScheme.read(ThriftHiveMetastore.java:51552)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result$get_table_resultStandardScheme.read(ThriftHiveMetastore.java:51520)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$get_table_result.read(ThriftHiveMetastore.java:51451)
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:88)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_get_table(ThriftHiveMetastore.java:1466)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.get_table(ThriftHiveMetastore.java:1452)
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.getTable(HiveMetaStoreClient.java:1332)
at com.netease.arctic.hive.HMSClientImpl.getTable(HMSClientImpl.java:74)
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.lambda$syncHiveSchemaToArctic$0(HiveMetaSynchronizer.java:78)
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:58)
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:51)
at com.netease.arctic.hive.CachedHiveClientPool.lambda$run$3(CachedHiveClientPool.java:86)
at com.netease.arctic.table.TableMetaStore.doAsUgi(TableMetaStore.java:365)
at com.netease.arctic.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:345)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:360)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1873)
at com.netease.arctic.table.TableMetaStore.doAs(TableMetaStore.java:345)
at com.netease.arctic.hive.CachedHiveClientPool.run(CachedHiveClientPool.java:86)
at com.netease.arctic.hive.utils.HiveMetaSynchronizer.syncHiveSchemaToArctic(HiveMetaSynchronizer.java:78)
... 76 more
How should we improve?
According to the stack, you should not try to sync hive schema to mixed-hive table when the hive table doesn't exist anymore
Are you willing to submit PR?
Subtasks
No response
Code of Conduct
The text was updated successfully, but these errors were encountered: