Skip to content

Commit

Permalink
HBASE-23313 [hbck2] setRegionState should update Master in-memory sta… (
Browse files Browse the repository at this point in the history
#864)

Signed-off-by: Mingliang Liu <liuml07@apache.org>
Signed-off-by: stack <stack@apache.org>
  • Loading branch information
wchevreuil authored Nov 27, 2019
1 parent 0d7a6b9 commit 636fa2c
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,19 @@
package org.apache.hadoop.hbase.client;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BypassProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BypassProcedureResponse;
Expand All @@ -45,6 +42,13 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ScheduleServerCrashProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignsResponse;

import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;

import org.apache.yetus.audience.InterfaceAudience;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of
* constructing an HBaseHbck directly.
Expand Down Expand Up @@ -98,15 +102,34 @@ public boolean isAborted() {
public TableState setTableStateInMeta(TableState state) throws IOException {
try {
GetTableStateResponse response = hbck.setTableStateInMeta(
rpcControllerFactory.newController(),
RequestConverter.buildSetTableStateInMetaRequest(state));
rpcControllerFactory.newController(),
RequestConverter.buildSetTableStateInMetaRequest(state));
return TableState.convert(state.getTableName(), response.getTableState());
} catch (ServiceException se) {
LOG.debug("table={}, state={}", state.getTableName(), state.getState(), se);
throw new IOException(se);
}
}

@Override
public List<RegionState> setRegionStateInMeta(List<RegionState> states) throws IOException {
try {
if(LOG.isDebugEnabled()) {
states.forEach(s ->
LOG.debug("region={}, state={}", s.getRegion().getRegionName(), s.getState())
);
}
MasterProtos.GetRegionStateInMetaResponse response = hbck.setRegionStateInMeta(
rpcControllerFactory.newController(),
RequestConverter.buildSetRegionStateInMetaRequest(states));
final List<RegionState> result = new ArrayList<>();
response.getStatesList().forEach(s -> result.add(RegionState.convert(s)));
return result;
} catch (ServiceException se) {
throw new IOException(se);
}
}

@Override
public List<Long> assigns(List<String> encodedRegionNames, boolean override)
throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
Expand Down Expand Up @@ -54,6 +55,14 @@ public interface Hbck extends Abortable, Closeable {
*/
TableState setTableStateInMeta(TableState state) throws IOException;

/**
* Update region state in Meta only. No procedures are submitted to manipulate the given region
* or any other region from same table.
* @param states list of all region states to be updated in meta
* @return previous state of the region in Meta
*/
List<RegionState> setRegionStateInMeta(List<RegionState> states) throws IOException;

/**
* Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time
* -- good if many Regions to online -- and it will schedule the assigns even in the case where
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ public RegionInfoBuilder setOffline(boolean offLine) {
return this;
}

public RegionInfoBuilder setEncodedName(String encodedName) {
this.encodedName = encodedName;
return this;
}

public RegionInfo build() {
return new MutableRegionInfo(tableName, startKey, endKey, split,
regionId, replicaId, offLine, regionName, encodedName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2226,6 +2226,14 @@ public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
.setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build();
}

public static HBaseProtos.RegionInfo toProtoRegionInfo(
org.apache.hadoop.hbase.client.RegionInfo regionInfo) {
return HBaseProtos.RegionInfo.newBuilder()
.setRegionId(regionInfo.getRegionId())
.setRegionEncodedName(regionInfo.getEncodedName())
.setTableName(toProtoTableName(regionInfo.getTable())).build();
}

public static List<TableName> toTableNameList(List<HBaseProtos.TableName> tableNamesList) {
if (tableNamesList == null) {
return new ArrayList<>();
Expand Down Expand Up @@ -3145,6 +3153,7 @@ public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.
builder.setOffline(info.isOffline());
builder.setSplit(info.isSplit());
builder.setReplicaId(info.getReplicaId());
builder.setRegionEncodedName(info.getEncodedName());
return builder.build();
}

Expand Down Expand Up @@ -3184,6 +3193,9 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase
if (proto.hasOffline()) {
rib.setOffline(proto.getOffline());
}
if (proto.hasRegionEncodedName()) {
rib.setEncodedName(proto.getRegionEncodedName());
}
return rib.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.util.Bytes;
Expand Down Expand Up @@ -132,6 +133,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
Expand Down Expand Up @@ -1258,6 +1260,18 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T
.setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build();
}

/**
* Creates a protocol buffer SetRegionStateInMetaRequest
* @param states list of regions states to update in Meta
* @return a SetRegionStateInMetaRequest
*/
public static SetRegionStateInMetaRequest buildSetRegionStateInMetaRequest(
final List<RegionState> states) {
final SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder();
states.forEach(s -> builder.addStates(s.convert()));
return builder.build();
}

/**
* Creates a protocol buffer GetTableDescriptorsRequest for a single table
*
Expand Down
1 change: 1 addition & 0 deletions hbase-protocol-shaded/src/main/protobuf/HBase.proto
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ message RegionInfo {
optional bool offline = 5;
optional bool split = 6;
optional int32 replica_id = 7 [default = 0];
optional string region_encoded_name = 8;
}

/**
Expand Down
12 changes: 12 additions & 0 deletions hbase-protocol-shaded/src/main/protobuf/Master.proto
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,10 @@ message GetTableStateResponse {
required TableState table_state = 1;
}

message GetRegionStateInMetaResponse {
repeated RegionState states = 1;
}


message GetClusterStatusRequest {
repeated Option options = 1;
Expand Down Expand Up @@ -1090,6 +1094,10 @@ message SetTableStateInMetaRequest {
required TableState table_state = 2;
}

message SetRegionStateInMetaRequest {
repeated RegionState states = 2;
}

/** Like Admin's AssignRegionRequest except it can
* take one or more Regions at a time.
*/
Expand Down Expand Up @@ -1152,6 +1160,10 @@ service HbckService {
rpc SetTableStateInMeta(SetTableStateInMetaRequest)
returns(GetTableStateResponse);

/** Update state of the region in meta only*/
rpc SetRegionStateInMeta(SetRegionStateInMetaRequest)
returns(GetRegionStateInMetaResponse);

/**
* Assign regions.
* Like Admin's assign but works even if the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Table;
Expand Down Expand Up @@ -197,6 +198,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetRegionStateInMetaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
Expand Down Expand Up @@ -276,6 +278,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
Expand Down Expand Up @@ -2465,6 +2468,42 @@ public GetTableStateResponse setTableStateInMeta(RpcController controller,
}
}

/**
* Update state of the region in meta only. This is required by hbck in some situations to cleanup
* stuck assign/ unassign regions procedures for the table.
*
* @return previous states of the regions
*/
@Override
public GetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller,
SetRegionStateInMetaRequest request) throws ServiceException {
final GetRegionStateInMetaResponse.Builder builder = GetRegionStateInMetaResponse.newBuilder();
for(ClusterStatusProtos.RegionState s : request.getStatesList()) {
try {
RegionInfo info = this.master.getAssignmentManager().
loadRegionFromMeta(s.getRegionInfo().getRegionEncodedName());
LOG.trace("region info loaded from meta table: {}", info);
RegionState prevState = this.master.getAssignmentManager().getRegionStates().
getRegionState(info);
RegionState newState = RegionState.convert(s);
LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
prevState.getState(), newState.getState());
Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis());
metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
Bytes.toBytes(newState.getState().name()));
List<Put> putList = new ArrayList<>();
putList.add(metaPut);
MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList);
//Loads from meta again to refresh AM cache with the new region state
this.master.getAssignmentManager().loadRegionFromMeta(info.getEncodedName());
builder.addStates(prevState.convert());
} catch (Exception e) {
throw new ServiceException(e);
}
}
return builder.build();
}

/**
* Get RegionInfo from Master using content of RegionSpecifier as key.
* @return RegionInfo found by decoding <code>rs</code> or null if none found
Expand Down Expand Up @@ -2834,4 +2873,5 @@ private boolean shouldSubmitSCP(ServerName serverName) {
}
return true;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,11 @@
import static org.junit.Assert.assertTrue;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.stream.Collectors;
Expand All @@ -38,6 +41,7 @@
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
import org.apache.hadoop.hbase.procedure2.Procedure;
Expand All @@ -48,6 +52,7 @@
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
Expand Down Expand Up @@ -182,6 +187,36 @@ public void testSetTableStateInMeta() throws Exception {
prevState.isDisabled());
}

@Test
public void testSetRegionStateInMeta() throws Exception {
Hbck hbck = getHbck();
try(Admin admin = TEST_UTIL.getAdmin()){
final List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
final AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
final List<RegionState> prevStates = new ArrayList<>();
final List<RegionState> newStates = new ArrayList<>();
final Map<String, Pair<RegionState, RegionState>> regionsMap = new HashMap<>();
regions.forEach(r -> {
RegionState prevState = am.getRegionStates().getRegionState(r);
prevStates.add(prevState);
RegionState newState = RegionState.createForTesting(r, RegionState.State.CLOSED);
newStates.add(newState);
regionsMap.put(r.getEncodedName(), new Pair<>(prevState, newState));
});
final List<RegionState> result = hbck.setRegionStateInMeta(newStates);
result.forEach(r -> {
RegionState prevState = regionsMap.get(r.getRegion().getEncodedName()).getFirst();
assertEquals(prevState.getState(), r.getState());
});
regions.forEach(r -> {
RegionState cachedState = am.getRegionStates().getRegionState(r.getEncodedName());
RegionState newState = regionsMap.get(r.getEncodedName()).getSecond();
assertEquals(newState.getState(), cachedState.getState());
});
hbck.setRegionStateInMeta(prevStates);
}
}

@Test
public void testAssigns() throws Exception {
Hbck hbck = getHbck();
Expand Down

0 comments on commit 636fa2c

Please sign in to comment.