diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveTableSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveTableSink.java index 54dae1df135122..22ceca331e5947 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveTableSink.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveTableSink.java @@ -48,12 +48,10 @@ import org.apache.doris.thrift.THiveTableSink; import com.google.common.base.Strings; -import com.google.common.collect.Lists; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -203,7 +201,7 @@ private void setPartitionValues(THiveTableSink tSink) throws AnalysisException { List partitions = new ArrayList<>(); - List hivePartitions; + List hivePartitions = new ArrayList<>(); if (targetTable.isPartitionedTable()) { // Get partitions from cache instead of HMS client (similar to HiveScanNode) HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr() @@ -213,14 +211,6 @@ private void setPartitionValues(THiveTableSink tSink) throws AnalysisException { List> partitionValuesList = new ArrayList<>(partitionValues.getPartitionValuesMap().values()); hivePartitions = cache.getAllPartitionsWithCache(targetTable, partitionValuesList); - } else { - // Non-partitioned table, create dummy partition - hivePartitions = Lists.newArrayList(); - StorageDescriptor sd = targetTable.getRemoteTable().getSd(); - HivePartition dummyPartition = new HivePartition(targetTable.getOrBuildNameMapping(), true, - sd.getInputFormat(), sd.getLocation(), Lists.newArrayList(), - sd.getParameters() != null ? sd.getParameters() : new HashMap<>()); - hivePartitions.add(dummyPartition); } // Convert HivePartition to THivePartition (same logic as before)