Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HBASE-27203 Clean up error-prone findings in hbase-client #4626

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hbase;

import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.client.RegionInfo;
Expand All @@ -28,7 +29,7 @@ public final class CacheEvictionStats {

private final long evictedBlocks;
private final long maxCacheSize;
private final Map<byte[], Throwable> exceptions;
private final IdentityHashMap<byte[], Throwable> exceptions;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this will be a bit dangerous if we will return this map to upper layer...

Copy link
Contributor Author

@apurtell apurtell Jul 18, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The warning here is the one that suggests maps and identity keys should not be mixed... that if the keys use object identity, the map type should be IdentityHashMap. It seems minor, but correct.


CacheEvictionStats(CacheEvictionStatsBuilder builder) {
this.evictedBlocks = builder.evictedBlocks;
Expand All @@ -44,6 +45,7 @@ public long getMaxCacheSize() {
return maxCacheSize;
}

@SuppressWarnings("IdentityHashMapUsage")
public Map<byte[], Throwable> getExceptions() {
return Collections.unmodifiableMap(exceptions);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@
*/
package org.apache.hadoop.hbase;

import java.util.HashMap;
import java.util.Map;
import java.util.IdentityHashMap;
import org.apache.yetus.audience.InterfaceAudience;

@InterfaceAudience.Private
public final class CacheEvictionStatsBuilder {

long evictedBlocks = 0;
long maxCacheSize = 0;
Map<byte[], Throwable> exceptions = new HashMap<>();
IdentityHashMap<byte[], Throwable> exceptions = new IdentityHashMap<>();

CacheEvictionStatsBuilder() {
}
Expand All @@ -44,6 +44,7 @@ public void addException(byte[] regionName, Throwable ie) {
exceptions.put(regionName, ie);
}

@SuppressWarnings("IdentityHashMapUsage")
public CacheEvictionStatsBuilder append(CacheEvictionStats stats) {
this.evictedBlocks += stats.getEvictedBlocks();
this.maxCacheSize += stats.getMaxCacheSize();
Expand All @@ -54,4 +55,5 @@ public CacheEvictionStatsBuilder append(CacheEvictionStats stats) {
public CacheEvictionStats build() {
return new CacheEvictionStats(this);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -349,8 +349,8 @@ public static TableState getTableState(Result r) throws IOException {
}

/**
* @return Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
* Return deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
Expand All @@ -376,8 +376,8 @@ public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
}

/**
* @return Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
* Return deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Expand All @@ -386,8 +386,8 @@ public static List<RegionInfo> getMergeRegions(Cell[] cells) {
}

/**
* @return True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
* Return true if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean hasMergeRegions(Cell[] cells) {
for (Cell cell : cells) {
Expand All @@ -399,7 +399,7 @@ public static boolean hasMergeRegions(Cell[] cells) {
}

/**
* @return True if the column in <code>cell</code> matches the regex 'info:merge.*'.
* Return true if the column in <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean isMergeQualifierPrefix(Cell cell) {
// Check to see if has family and that qualifier starts with the merge qualifier 'merge'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ private ClientMetaTableAccessor() {
}

@InterfaceAudience.Private
@SuppressWarnings("ImmutableEnumChecker")
public enum QueryType {
ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY),
REGION(HConstants.CATALOG_FAMILY),
Expand Down Expand Up @@ -101,8 +102,7 @@ public static CompletableFuture<Optional<TableState>> getTableState(AsyncTable<?
}

/**
* Returns the HRegionLocation from meta for the given region n * @param regionName region we're
* looking for
* Returns the HRegionLocation from meta for the given region we're looking for
* @return HRegionLocation for the given region
*/
public static CompletableFuture<Optional<HRegionLocation>>
Expand All @@ -127,8 +127,7 @@ public static CompletableFuture<Optional<TableState>> getTableState(AsyncTable<?
}

/**
* Returns the HRegionLocation from meta for the given encoded region name n * @param
* encodedRegionName region we're looking for
* Returns the HRegionLocation from meta for the given encoded region name we're looking for
* @return HRegionLocation for the given region
*/
public static CompletableFuture<Optional<HRegionLocation>>
Expand Down Expand Up @@ -167,8 +166,8 @@ private static Optional<TableState> getTableState(Result r) throws IOException {
}

/**
* Used to get all region locations for the specific table. n * @param tableName table we're
* looking for, can be null for getting all regions
* Used to get all region locations for the specific table we're looking for. Can be null for
* getting all regions.
* @return the list of region locations. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
Expand All @@ -191,9 +190,8 @@ public static CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(
}

/**
* Used to get table regions' info and server. n * @param tableName table we're looking for, can
* be null for getting all regions
* @param excludeOfflinedSplitParents don't return split parents
* Used to get table regions' info and server for the table we're looking for. Can be null for
* getting all regions.
* @return the list of regioninfos and server. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
Expand All @@ -220,24 +218,12 @@ private static CompletableFuture<List<Pair<RegionInfo, ServerName>>> getTableReg
return future;
}

/**
* Performs a scan of META table for given table. n * @param tableName table withing we scan
* @param type scanned part of meta
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
TableName tableName, QueryType type, final Visitor visitor) {
return scanMeta(metaTable, getTableStartRowForMeta(tableName, type),
getTableStopRowForMeta(tableName, type), type, Integer.MAX_VALUE, visitor);
}

/**
* Performs a scan of META table for given table. n * @param startRow Where to start the scan
* @param stopRow Where to stop the scan
* @param type scanned part of meta
* @param maxRows maximum rows to return
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {
int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
Expand Down Expand Up @@ -383,7 +369,7 @@ public boolean visit(Result r) throws IOException {
abstract void add(Result r);

/**
* @return Collected results; wait till visits complete to collect all possible results
* Return collected results; wait till visits complete to collect all possible results
*/
List<T> getResults() {
return this.results;
Expand Down Expand Up @@ -468,6 +454,7 @@ private static Optional<RegionLocations> getRegionLocations(Result r) {
}

/**
* Determine the start row for scanning META according to query type
* @param tableName table we're working with
* @return start row for scanning META according to query type
*/
Expand All @@ -493,6 +480,7 @@ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type
}

/**
* Determine the stop row for scanning META according to query type
* @param tableName table we're working with
* @return stop row for scanning META according to query type
*/
Expand Down
23 changes: 7 additions & 16 deletions hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@
public class ClusterId {
private final String id;

/**
* New ClusterID. Generates a uniqueid.
*/
/** New ClusterID. Generates a unique UUID. */
public ClusterId() {
this(UUID.randomUUID().toString());
}
Expand All @@ -45,16 +43,16 @@ public ClusterId(final String uuid) {
this.id = uuid;
}

/**
* @return The clusterid serialized using pb w/ pb magic prefix
*/
/** Return the cluster id serialized using pb with the pb magic prefix */
public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/**
* Parse a serialized representation of the cluster id.
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
Expand All @@ -74,24 +72,17 @@ public static ClusterId parseFrom(final byte[] bytes) throws DeserializationExce
}
}

/**
* @return A pb instance to represent this instance.
*/
/** Return a pb instance to represent this instance. */
public ClusterIdProtos.ClusterId convert() {
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
return builder.setClusterId(this.id).build();
}

/**
* n * @return A {@link ClusterId} made from the passed in <code>cid</code>
*/
/** Return a {@link ClusterId} made from the passed in <code>cid</code> */
public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
return new ClusterId(cid.getClusterId());
}

/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.id;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,36 +70,36 @@
public interface ClusterMetrics {

/**
* @return the HBase version string as reported by the HMaster
* Return the HBase version string as reported by the master.
*/
@Nullable
String getHBaseVersion();

/**
* @return the names of region servers on the dead list
* Return the names of region servers on the dead list.
*/
List<ServerName> getDeadServerNames();

/**
* @return the names of region servers on the decommissioned list
* Return the names of region servers on the decommissioned list.
*/
List<ServerName> getDecommissionedServerNames();

/**
* @return the names of region servers on the live list
* Return the names of region servers on the live list.
*/
Map<ServerName, ServerMetrics> getLiveServerMetrics();

/**
* @return the number of regions deployed on the cluster
* Return the number of regions deployed on the cluster.
*/
default int getRegionCount() {
return getLiveServerMetrics().entrySet().stream()
.mapToInt(v -> v.getValue().getRegionMetrics().size()).sum();
}

/**
* @return the number of requests since last report
* Return the number of requests since the last report.
*/
default long getRequestCount() {
return getLiveServerMetrics().entrySet().stream()
Expand All @@ -115,7 +115,7 @@ default long getRequestCount() {
ServerName getMasterName();

/**
* @return the names of backup masters
* Return the names of the backup masters.
*/
List<ServerName> getBackupMasterNames();

Expand Down Expand Up @@ -148,7 +148,7 @@ default long getLastMajorCompactionTimestamp(byte[] regionName) {
List<ServerName> getServersName();

/**
* @return the average cluster load
* Return the average cluster load.
*/
default double getAverageLoad() {
int serverSize = getLiveServerMetrics().size();
Expand All @@ -159,7 +159,7 @@ default double getAverageLoad() {
}

/**
* Provide region states count for given table. e.g howmany regions of give table are
* Provide region states count for given table. e.g how many regions of give table are
* opened/closed/rit etc
* @return map of table to region states count
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m
.collect(Collectors.toList()))
.addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
.map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
.setTableName(ProtobufUtil.toProtoTableName(status.getKey()))
.setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build())
.collect(Collectors.toList()))
.addAllDecommissionedServers(metrics.getDecommissionedServerNames().stream()
.map(ProtobufUtil::toServerName).collect(Collectors.toList()));
if (metrics.getMasterName() != null) {
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName()));
}
if (metrics.getMasterTasks() != null) {
builder.addAllMasterTasks(metrics.getMasterTasks().stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,29 +28,25 @@
@InterfaceStability.Evolving
public interface CoprocessorEnvironment<C extends Coprocessor> {

/** @return the Coprocessor interface version */
/** Return the coprocessor interface version */
int getVersion();

/** @return the HBase version as a string (e.g. "0.21.0") */
/** Return the HBase version as a string (e.g. "0.21.0") */
String getHBaseVersion();

/** @return the loaded coprocessor instance */
/** Return the loaded coprocessor instance */
C getInstance();

/** @return the priority assigned to the loaded coprocessor */
/** Return the priority assigned to the loaded coprocessor */
int getPriority();

/** @return the load sequence number */
/** Return the load sequence number */
int getLoadSequence();

/**
* @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
* set a configuration.
*/
/** Return a read-only Configuration. */
Configuration getConfiguration();

/**
* @return the classloader for the loaded coprocessor instance
*/
/** Return the classloader for the loaded coprocessor instance */
ClassLoader getClassLoader();

}
Loading