Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,16 @@ public boolean dropPartition(String catName, String dbName, String tableName, St
}
}

@Override
public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
throws MetaException, NoSuchObjectException {
if (shouldEventSucceed) {
super.dropPartitions(catName, dbName, tblName, partNames);
} else {
throw new RuntimeException("Event failed.");
}
}

@Override
public Table alterTable(String catName, String dbName, String name, Table newTable, String queryValidWriteIds)
throws InvalidObjectException, MetaException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,14 +118,15 @@ public void testMetaDataCounts() throws Exception {

Assert.assertEquals(1, Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_DATABASES).getCount());
Assert.assertEquals(3, Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_TABLES).getCount());
Assert.assertEquals(3, Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_PARTITIONS).getCount());
//skip counting the dropped partitions while dropping the database tempdb
Assert.assertEquals(1, Metrics.getRegistry().getCounters().get(MetricsConstants.DELETE_TOTAL_PARTITIONS).getCount());

//to test initial metadata count metrics.
Assert.assertEquals(initDbCount + 1,
Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_DATABASES).getValue());
Assert.assertEquals(initTblCount + 4,
Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_TABLES).getValue());
Assert.assertEquals(initPartCount + 6,
Assert.assertEquals(initPartCount + 8,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would these metrics change since we are doing an optimization?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

on dropping the table, the new change doesn't decrease the TOTAL_PARTITIONS any more:
https://github.com/apache/hive/pull/5851/files#diff-90c669c961ab250c01087a409b31126cce7ac0672c40f465e7ee262ed7bcdaddL3280
This TOTAL_PARTITIONS counter makes no sense to me if HMS is HA, and is misleading.

Metrics.getRegistry().getGauges().get(MetricsConstants.TOTAL_PARTITIONS).getValue());

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
Expand Down Expand Up @@ -86,7 +87,8 @@ private static class NameFilter extends AbstractFilter {
private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
join(new String[]{"org.apache.hadoop.mapreduce.JobSubmitter",
"org.apache.hadoop.mapreduce.Job", "SessionState", "ReplState", Task.class.getName(),
TezTask.class.getName(), Driver.class.getName(), BasicStatsTask.class.getName()}));
TezTask.class.getName(), Driver.class.getName(), BasicStatsTask.class.getName(),
ThriftHiveMetaStoreClient.class.getName()}));

/* Patterns that are included in performance logging level.
* In performance mode, show execution and performance logger messages.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2157,14 +2157,14 @@ private boolean validatePartitions(List<Partition> partitions, String dbName, St
}

// check that all new partitions are belonging to tables located in the same database
if (dbName != null && !dbName.equals(p.getDbName())) {
if (dbName != null && !dbName.equalsIgnoreCase(p.getDbName())) {
throw new MetaException("Partition tables doesn't belong to the same database "
+ Arrays.toString(partitions.toArray()));
} else {
dbName = p.getDbName();
}
// check if all new partitions are part of the same table
if (tableName != null && !tableName.equals(p.getTableName())) {
if (tableName != null && !tableName.equalsIgnoreCase(p.getTableName())) {
throw new MetaException("New partitions doesn't belong to the same table "
+ Arrays.toString(partitions.toArray()));
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.StatsSetupConst;
Expand Down Expand Up @@ -1028,8 +1027,12 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext
new ArrayList<>(Arrays.asList(new_part)), false);
addPartitionsReq.setCatName(new_part.getCatName());
addPartitionsReq.setEnvironmentContext(envContext);
Partition p = client.add_partitions_req(addPartitionsReq).getPartitions().get(0);
return HiveMetaStoreClientUtils.deepCopy(p);

List<Partition> new_parts = client.add_partitions_req(addPartitionsReq).getPartitions();
if (new_parts != null && !new_parts.isEmpty()) {
return HiveMetaStoreClientUtils.deepCopy(new_parts.getFirst());
}
return null;
}

@Override
Expand Down Expand Up @@ -1076,10 +1079,13 @@ public List<Partition> add_partitions(
AddPartitionsResult result = client.add_partitions_req(req);
if (needResults) {
List<Partition> new_parts = HiveMetaStoreClientUtils.deepCopyPartitions(result.getPartitions());
if (skipColumnSchemaForPartition) {
new_parts.forEach(partition -> partition.getSd().setCols(result.getPartitionColSchema()));
if (new_parts != null && !new_parts.isEmpty()) {
if (skipColumnSchemaForPartition) {
new_parts.forEach(partition -> partition.getSd().setCols(result.getPartitionColSchema()));
}
return FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, new_parts);
}
return FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, new_parts);
return new ArrayList<>();
}
return null;
}
Expand Down Expand Up @@ -1518,7 +1524,22 @@ public boolean createType(Type type) throws AlreadyExistsException,

@Override
public void dropDatabase(DropDatabaseRequest req) throws TException {
client.drop_database_req(req);
req.setAsyncDrop(!isLocalMetaStore());
AsyncOperationResp resp = client.drop_database_req(req);
req.setId(resp.getId());
try {
while (!resp.isFinished() && !Thread.currentThread().isInterrupted()) {
resp = client.drop_database_req(req);
if (resp.getMessage() != null) {
LOG.info(resp.getMessage());
}
}
} finally {
if (!resp.isFinished()) {
req.setCancel(true);
client.drop_database_req(req);
}
}
}

@Override
Expand Down Expand Up @@ -1679,7 +1700,22 @@ public void dropTable(String catName, String dbname, String name, boolean delete
dropTableReq.setCatalogName(catName);
dropTableReq.setDropPartitions(true);
dropTableReq.setEnvContext(envContext);
client.drop_table_req(dropTableReq);
dropTableReq.setAsyncDrop(!isLocalMetaStore());
AsyncOperationResp resp = client.drop_table_req(dropTableReq);
dropTableReq.setId(resp.getId());
try {
while (!resp.isFinished() && !Thread.currentThread().isInterrupted()) {
resp = client.drop_table_req(dropTableReq);
if (resp.getMessage() != null) {
LOG.info(resp.getMessage());
}
}
} finally {
if (!resp.isFinished()) {
dropTableReq.setCancel(true);
client.drop_table_req(dropTableReq);
}
}
}

@Override
Expand Down
Loading